alsa: Jack detection kcontrol implementation
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     char *paths_dir;
94     pa_alsa_fdlist *mixer_fdl;
95     pa_alsa_mixer_pdata *mixer_pd;
96     snd_mixer_t *mixer_handle;
97     pa_alsa_path_set *mixer_path_set;
98     pa_alsa_path *mixer_path;
99
100     pa_cvolume hardware_volume;
101
102     unsigned int *rates;
103
104     size_t
105         frame_size,
106         fragment_size,
107         hwbuf_size,
108         tsched_watermark,
109         tsched_watermark_ref,
110         hwbuf_unused,
111         min_sleep,
112         min_wakeup,
113         watermark_inc_step,
114         watermark_dec_step,
115         watermark_inc_threshold,
116         watermark_dec_threshold;
117
118     pa_usec_t watermark_dec_not_before;
119     pa_usec_t min_latency_ref;
120
121     char *device_name;  /* name of the PCM device */
122     char *control_device; /* name of the control device */
123
124     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
125
126     pa_bool_t first;
127
128     pa_rtpoll_item *alsa_rtpoll_item;
129
130     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
131
132     pa_smoother *smoother;
133     uint64_t read_count;
134     pa_usec_t smoother_interval;
135     pa_usec_t last_smoother_update;
136
137     pa_reserve_wrapper *reserve;
138     pa_hook_slot *reserve_slot;
139     pa_reserve_monitor_wrapper *monitor;
140     pa_hook_slot *monitor_slot;
141 };
142
143 static void userdata_free(struct userdata *u);
144
145 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
146     pa_assert(r);
147     pa_assert(u);
148
149     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
150         return PA_HOOK_CANCEL;
151
152     return PA_HOOK_OK;
153 }
154
155 static void reserve_done(struct userdata *u) {
156     pa_assert(u);
157
158     if (u->reserve_slot) {
159         pa_hook_slot_free(u->reserve_slot);
160         u->reserve_slot = NULL;
161     }
162
163     if (u->reserve) {
164         pa_reserve_wrapper_unref(u->reserve);
165         u->reserve = NULL;
166     }
167 }
168
169 static void reserve_update(struct userdata *u) {
170     const char *description;
171     pa_assert(u);
172
173     if (!u->source || !u->reserve)
174         return;
175
176     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
177         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
178 }
179
180 static int reserve_init(struct userdata *u, const char *dname) {
181     char *rname;
182
183     pa_assert(u);
184     pa_assert(dname);
185
186     if (u->reserve)
187         return 0;
188
189     if (pa_in_system_mode())
190         return 0;
191
192     if (!(rname = pa_alsa_get_reserve_name(dname)))
193         return 0;
194
195     /* We are resuming, try to lock the device */
196     u->reserve = pa_reserve_wrapper_get(u->core, rname);
197     pa_xfree(rname);
198
199     if (!(u->reserve))
200         return -1;
201
202     reserve_update(u);
203
204     pa_assert(!u->reserve_slot);
205     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
206
207     return 0;
208 }
209
210 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
211     pa_bool_t b;
212
213     pa_assert(w);
214     pa_assert(u);
215
216     b = PA_PTR_TO_UINT(busy) && !u->reserve;
217
218     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
219     return PA_HOOK_OK;
220 }
221
222 static void monitor_done(struct userdata *u) {
223     pa_assert(u);
224
225     if (u->monitor_slot) {
226         pa_hook_slot_free(u->monitor_slot);
227         u->monitor_slot = NULL;
228     }
229
230     if (u->monitor) {
231         pa_reserve_monitor_wrapper_unref(u->monitor);
232         u->monitor = NULL;
233     }
234 }
235
236 static int reserve_monitor_init(struct userdata *u, const char *dname) {
237     char *rname;
238
239     pa_assert(u);
240     pa_assert(dname);
241
242     if (pa_in_system_mode())
243         return 0;
244
245     if (!(rname = pa_alsa_get_reserve_name(dname)))
246         return 0;
247
248     /* We are resuming, try to lock the device */
249     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
250     pa_xfree(rname);
251
252     if (!(u->monitor))
253         return -1;
254
255     pa_assert(!u->monitor_slot);
256     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
257
258     return 0;
259 }
260
261 static void fix_min_sleep_wakeup(struct userdata *u) {
262     size_t max_use, max_use_2;
263
264     pa_assert(u);
265     pa_assert(u->use_tsched);
266
267     max_use = u->hwbuf_size - u->hwbuf_unused;
268     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
269
270     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
271     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
272
273     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
274     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
275 }
276
277 static void fix_tsched_watermark(struct userdata *u) {
278     size_t max_use;
279     pa_assert(u);
280     pa_assert(u->use_tsched);
281
282     max_use = u->hwbuf_size - u->hwbuf_unused;
283
284     if (u->tsched_watermark > max_use - u->min_sleep)
285         u->tsched_watermark = max_use - u->min_sleep;
286
287     if (u->tsched_watermark < u->min_wakeup)
288         u->tsched_watermark = u->min_wakeup;
289 }
290
291 static void increase_watermark(struct userdata *u) {
292     size_t old_watermark;
293     pa_usec_t old_min_latency, new_min_latency;
294
295     pa_assert(u);
296     pa_assert(u->use_tsched);
297
298     /* First, just try to increase the watermark */
299     old_watermark = u->tsched_watermark;
300     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
301     fix_tsched_watermark(u);
302
303     if (old_watermark != u->tsched_watermark) {
304         pa_log_info("Increasing wakeup watermark to %0.2f ms",
305                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
306         return;
307     }
308
309     /* Hmm, we cannot increase the watermark any further, hence let's
310      raise the latency unless doing so was disabled in
311      configuration */
312     if (u->fixed_latency_range)
313         return;
314
315     old_min_latency = u->source->thread_info.min_latency;
316     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
317     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
318
319     if (old_min_latency != new_min_latency) {
320         pa_log_info("Increasing minimal latency to %0.2f ms",
321                     (double) new_min_latency / PA_USEC_PER_MSEC);
322
323         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
324     }
325
326     /* When we reach this we're officialy fucked! */
327 }
328
329 static void decrease_watermark(struct userdata *u) {
330     size_t old_watermark;
331     pa_usec_t now;
332
333     pa_assert(u);
334     pa_assert(u->use_tsched);
335
336     now = pa_rtclock_now();
337
338     if (u->watermark_dec_not_before <= 0)
339         goto restart;
340
341     if (u->watermark_dec_not_before > now)
342         return;
343
344     old_watermark = u->tsched_watermark;
345
346     if (u->tsched_watermark < u->watermark_dec_step)
347         u->tsched_watermark = u->tsched_watermark / 2;
348     else
349         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
350
351     fix_tsched_watermark(u);
352
353     if (old_watermark != u->tsched_watermark)
354         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
355                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
356
357     /* We don't change the latency range*/
358
359 restart:
360     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
361 }
362
363 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
364     pa_usec_t wm, usec;
365
366     pa_assert(sleep_usec);
367     pa_assert(process_usec);
368
369     pa_assert(u);
370     pa_assert(u->use_tsched);
371
372     usec = pa_source_get_requested_latency_within_thread(u->source);
373
374     if (usec == (pa_usec_t) -1)
375         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
376
377     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
378
379     if (wm > usec)
380         wm = usec/2;
381
382     *sleep_usec = usec - wm;
383     *process_usec = wm;
384
385 #ifdef DEBUG_TIMING
386     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
387                  (unsigned long) (usec / PA_USEC_PER_MSEC),
388                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
389                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
390 #endif
391 }
392
393 static int try_recover(struct userdata *u, const char *call, int err) {
394     pa_assert(u);
395     pa_assert(call);
396     pa_assert(err < 0);
397
398     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
399
400     pa_assert(err != -EAGAIN);
401
402     if (err == -EPIPE)
403         pa_log_debug("%s: Buffer overrun!", call);
404
405     if (err == -ESTRPIPE)
406         pa_log_debug("%s: System suspended!", call);
407
408     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
409         pa_log("%s: %s", call, pa_alsa_strerror(err));
410         return -1;
411     }
412
413     u->first = TRUE;
414     return 0;
415 }
416
417 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
418     size_t left_to_record;
419     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
420     pa_bool_t overrun = FALSE;
421
422     /* We use <= instead of < for this check here because an overrun
423      * only happens after the last sample was processed, not already when
424      * it is removed from the buffer. This is particularly important
425      * when block transfer is used. */
426
427     if (n_bytes <= rec_space)
428         left_to_record = rec_space - n_bytes;
429     else {
430
431         /* We got a dropout. What a mess! */
432         left_to_record = 0;
433         overrun = TRUE;
434
435 #ifdef DEBUG_TIMING
436         PA_DEBUG_TRAP;
437 #endif
438
439         if (pa_log_ratelimit(PA_LOG_INFO))
440             pa_log_info("Overrun!");
441     }
442
443 #ifdef DEBUG_TIMING
444     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
445 #endif
446
447     if (u->use_tsched) {
448         pa_bool_t reset_not_before = TRUE;
449
450         if (overrun || left_to_record < u->watermark_inc_threshold)
451             increase_watermark(u);
452         else if (left_to_record > u->watermark_dec_threshold) {
453             reset_not_before = FALSE;
454
455             /* We decrease the watermark only if have actually
456              * been woken up by a timeout. If something else woke
457              * us up it's too easy to fulfill the deadlines... */
458
459             if (on_timeout)
460                 decrease_watermark(u);
461         }
462
463         if (reset_not_before)
464             u->watermark_dec_not_before = 0;
465     }
466
467     return left_to_record;
468 }
469
470 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
471     pa_bool_t work_done = FALSE;
472     pa_usec_t max_sleep_usec = 0, process_usec = 0;
473     size_t left_to_record;
474     unsigned j = 0;
475
476     pa_assert(u);
477     pa_source_assert_ref(u->source);
478
479     if (u->use_tsched)
480         hw_sleep_time(u, &max_sleep_usec, &process_usec);
481
482     for (;;) {
483         snd_pcm_sframes_t n;
484         size_t n_bytes;
485         int r;
486         pa_bool_t after_avail = TRUE;
487
488         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
489
490             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
491                 continue;
492
493             return r;
494         }
495
496         n_bytes = (size_t) n * u->frame_size;
497
498 #ifdef DEBUG_TIMING
499         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
500 #endif
501
502         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
503         on_timeout = FALSE;
504
505         if (u->use_tsched)
506             if (!polled &&
507                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
508 #ifdef DEBUG_TIMING
509                 pa_log_debug("Not reading, because too early.");
510 #endif
511                 break;
512             }
513
514         if (PA_UNLIKELY(n_bytes <= 0)) {
515
516             if (polled)
517                 PA_ONCE_BEGIN {
518                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
519                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
520                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
521                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
522                            pa_strnull(dn));
523                     pa_xfree(dn);
524                 } PA_ONCE_END;
525
526 #ifdef DEBUG_TIMING
527             pa_log_debug("Not reading, because not necessary.");
528 #endif
529             break;
530         }
531
532
533         if (++j > 10) {
534 #ifdef DEBUG_TIMING
535             pa_log_debug("Not filling up, because already too many iterations.");
536 #endif
537
538             break;
539         }
540
541         polled = FALSE;
542
543 #ifdef DEBUG_TIMING
544         pa_log_debug("Reading");
545 #endif
546
547         for (;;) {
548             pa_memchunk chunk;
549             void *p;
550             int err;
551             const snd_pcm_channel_area_t *areas;
552             snd_pcm_uframes_t offset, frames;
553             snd_pcm_sframes_t sframes;
554
555             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
556 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
557
558             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
559
560                 if (!after_avail && err == -EAGAIN)
561                     break;
562
563                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
564                     continue;
565
566                 return r;
567             }
568
569             /* Make sure that if these memblocks need to be copied they will fit into one slot */
570             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
571                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
572
573             if (!after_avail && frames == 0)
574                 break;
575
576             pa_assert(frames > 0);
577             after_avail = FALSE;
578
579             /* Check these are multiples of 8 bit */
580             pa_assert((areas[0].first & 7) == 0);
581             pa_assert((areas[0].step & 7)== 0);
582
583             /* We assume a single interleaved memory buffer */
584             pa_assert((areas[0].first >> 3) == 0);
585             pa_assert((areas[0].step >> 3) == u->frame_size);
586
587             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
588
589             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
590             chunk.length = pa_memblock_get_length(chunk.memblock);
591             chunk.index = 0;
592
593             pa_source_post(u->source, &chunk);
594             pa_memblock_unref_fixed(chunk.memblock);
595
596             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
597
598                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
599                     continue;
600
601                 return r;
602             }
603
604             work_done = TRUE;
605
606             u->read_count += frames * u->frame_size;
607
608 #ifdef DEBUG_TIMING
609             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
610 #endif
611
612             if ((size_t) frames * u->frame_size >= n_bytes)
613                 break;
614
615             n_bytes -= (size_t) frames * u->frame_size;
616         }
617     }
618
619     if (u->use_tsched) {
620         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
621         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
622
623         if (*sleep_usec > process_usec)
624             *sleep_usec -= process_usec;
625         else
626             *sleep_usec = 0;
627     }
628
629     return work_done ? 1 : 0;
630 }
631
632 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
633     int work_done = FALSE;
634     pa_usec_t max_sleep_usec = 0, process_usec = 0;
635     size_t left_to_record;
636     unsigned j = 0;
637
638     pa_assert(u);
639     pa_source_assert_ref(u->source);
640
641     if (u->use_tsched)
642         hw_sleep_time(u, &max_sleep_usec, &process_usec);
643
644     for (;;) {
645         snd_pcm_sframes_t n;
646         size_t n_bytes;
647         int r;
648         pa_bool_t after_avail = TRUE;
649
650         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
651
652             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
653                 continue;
654
655             return r;
656         }
657
658         n_bytes = (size_t) n * u->frame_size;
659         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
660         on_timeout = FALSE;
661
662         if (u->use_tsched)
663             if (!polled &&
664                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
665                 break;
666
667         if (PA_UNLIKELY(n_bytes <= 0)) {
668
669             if (polled)
670                 PA_ONCE_BEGIN {
671                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
672                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
673                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
674                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
675                            pa_strnull(dn));
676                     pa_xfree(dn);
677                 } PA_ONCE_END;
678
679             break;
680         }
681
682         if (++j > 10) {
683 #ifdef DEBUG_TIMING
684             pa_log_debug("Not filling up, because already too many iterations.");
685 #endif
686
687             break;
688         }
689
690         polled = FALSE;
691
692         for (;;) {
693             void *p;
694             snd_pcm_sframes_t frames;
695             pa_memchunk chunk;
696
697             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
698
699             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
700
701             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
702                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
703
704 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
705
706             p = pa_memblock_acquire(chunk.memblock);
707             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
708             pa_memblock_release(chunk.memblock);
709
710             if (PA_UNLIKELY(frames < 0)) {
711                 pa_memblock_unref(chunk.memblock);
712
713                 if (!after_avail && (int) frames == -EAGAIN)
714                     break;
715
716                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
717                     continue;
718
719                 return r;
720             }
721
722             if (!after_avail && frames == 0) {
723                 pa_memblock_unref(chunk.memblock);
724                 break;
725             }
726
727             pa_assert(frames > 0);
728             after_avail = FALSE;
729
730             chunk.index = 0;
731             chunk.length = (size_t) frames * u->frame_size;
732
733             pa_source_post(u->source, &chunk);
734             pa_memblock_unref(chunk.memblock);
735
736             work_done = TRUE;
737
738             u->read_count += frames * u->frame_size;
739
740 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
741
742             if ((size_t) frames * u->frame_size >= n_bytes)
743                 break;
744
745             n_bytes -= (size_t) frames * u->frame_size;
746         }
747     }
748
749     if (u->use_tsched) {
750         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
751         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
752
753         if (*sleep_usec > process_usec)
754             *sleep_usec -= process_usec;
755         else
756             *sleep_usec = 0;
757     }
758
759     return work_done ? 1 : 0;
760 }
761
762 static void update_smoother(struct userdata *u) {
763     snd_pcm_sframes_t delay = 0;
764     uint64_t position;
765     int err;
766     pa_usec_t now1 = 0, now2;
767     snd_pcm_status_t *status;
768
769     snd_pcm_status_alloca(&status);
770
771     pa_assert(u);
772     pa_assert(u->pcm_handle);
773
774     /* Let's update the time smoother */
775
776     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
777         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
778         return;
779     }
780
781     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
782         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
783     else {
784         snd_htimestamp_t htstamp = { 0, 0 };
785         snd_pcm_status_get_htstamp(status, &htstamp);
786         now1 = pa_timespec_load(&htstamp);
787     }
788
789     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
790     if (now1 <= 0)
791         now1 = pa_rtclock_now();
792
793     /* check if the time since the last update is bigger than the interval */
794     if (u->last_smoother_update > 0)
795         if (u->last_smoother_update + u->smoother_interval > now1)
796             return;
797
798     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
799     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
800
801     pa_smoother_put(u->smoother, now1, now2);
802
803     u->last_smoother_update = now1;
804     /* exponentially increase the update interval up to the MAX limit */
805     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
806 }
807
808 static pa_usec_t source_get_latency(struct userdata *u) {
809     int64_t delay;
810     pa_usec_t now1, now2;
811
812     pa_assert(u);
813
814     now1 = pa_rtclock_now();
815     now2 = pa_smoother_get(u->smoother, now1);
816
817     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
818
819     return delay >= 0 ? (pa_usec_t) delay : 0;
820 }
821
822 static int build_pollfd(struct userdata *u) {
823     pa_assert(u);
824     pa_assert(u->pcm_handle);
825
826     if (u->alsa_rtpoll_item)
827         pa_rtpoll_item_free(u->alsa_rtpoll_item);
828
829     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
830         return -1;
831
832     return 0;
833 }
834
835 /* Called from IO context */
836 static int suspend(struct userdata *u) {
837     pa_assert(u);
838     pa_assert(u->pcm_handle);
839
840     pa_smoother_pause(u->smoother, pa_rtclock_now());
841
842     /* Let's suspend */
843     snd_pcm_close(u->pcm_handle);
844     u->pcm_handle = NULL;
845
846     if (u->alsa_rtpoll_item) {
847         pa_rtpoll_item_free(u->alsa_rtpoll_item);
848         u->alsa_rtpoll_item = NULL;
849     }
850
851     pa_log_info("Device suspended...");
852
853     return 0;
854 }
855
856 /* Called from IO context */
857 static int update_sw_params(struct userdata *u) {
858     snd_pcm_uframes_t avail_min;
859     int err;
860
861     pa_assert(u);
862
863     /* Use the full buffer if no one asked us for anything specific */
864     u->hwbuf_unused = 0;
865
866     if (u->use_tsched) {
867         pa_usec_t latency;
868
869         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
870             size_t b;
871
872             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
873
874             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
875
876             /* We need at least one sample in our buffer */
877
878             if (PA_UNLIKELY(b < u->frame_size))
879                 b = u->frame_size;
880
881             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
882         }
883
884         fix_min_sleep_wakeup(u);
885         fix_tsched_watermark(u);
886     }
887
888     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
889
890     avail_min = 1;
891
892     if (u->use_tsched) {
893         pa_usec_t sleep_usec, process_usec;
894
895         hw_sleep_time(u, &sleep_usec, &process_usec);
896         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
897     }
898
899     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
900
901     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
902         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
903         return err;
904     }
905
906     return 0;
907 }
908
909 /* Called from IO Context on unsuspend or from main thread when creating source */
910 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
911                             pa_bool_t in_thread)
912 {
913     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
914                                                     &u->source->sample_spec);
915
916     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
917     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
918
919     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
920     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
921
922     fix_min_sleep_wakeup(u);
923     fix_tsched_watermark(u);
924
925     if (in_thread)
926         pa_source_set_latency_range_within_thread(u->source,
927                                                   u->min_latency_ref,
928                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
929     else {
930         pa_source_set_latency_range(u->source,
931                                     0,
932                                     pa_bytes_to_usec(u->hwbuf_size, ss));
933
934         /* work-around assert in pa_source_set_latency_within_thead,
935            keep track of min_latency and reuse it when
936            this routine is called from IO context */
937         u->min_latency_ref = u->source->thread_info.min_latency;
938     }
939
940     pa_log_info("Time scheduling watermark is %0.2fms",
941                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
942 }
943
944 /* Called from IO context */
945 static int unsuspend(struct userdata *u) {
946     pa_sample_spec ss;
947     int err;
948     pa_bool_t b, d;
949     snd_pcm_uframes_t period_size, buffer_size;
950
951     pa_assert(u);
952     pa_assert(!u->pcm_handle);
953
954     pa_log_info("Trying resume...");
955
956     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
957                             SND_PCM_NONBLOCK|
958                             SND_PCM_NO_AUTO_RESAMPLE|
959                             SND_PCM_NO_AUTO_CHANNELS|
960                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
961         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
962         goto fail;
963     }
964
965     ss = u->source->sample_spec;
966     period_size = u->fragment_size / u->frame_size;
967     buffer_size = u->hwbuf_size / u->frame_size;
968     b = u->use_mmap;
969     d = u->use_tsched;
970
971     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
972         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
973         goto fail;
974     }
975
976     if (b != u->use_mmap || d != u->use_tsched) {
977         pa_log_warn("Resume failed, couldn't get original access mode.");
978         goto fail;
979     }
980
981     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
982         pa_log_warn("Resume failed, couldn't restore original sample settings.");
983         goto fail;
984     }
985
986     if (period_size*u->frame_size != u->fragment_size ||
987         buffer_size*u->frame_size != u->hwbuf_size) {
988         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
989                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
990                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
991         goto fail;
992     }
993
994     if (update_sw_params(u) < 0)
995         goto fail;
996
997     if (build_pollfd(u) < 0)
998         goto fail;
999
1000     /* FIXME: We need to reload the volume somehow */
1001
1002     u->read_count = 0;
1003     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1004     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1005     u->last_smoother_update = 0;
1006
1007     u->first = TRUE;
1008
1009     /* reset the watermark to the value defined when source was created */
1010     if (u->use_tsched)
1011         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1012
1013     pa_log_info("Resumed successfully...");
1014
1015     return 0;
1016
1017 fail:
1018     if (u->pcm_handle) {
1019         snd_pcm_close(u->pcm_handle);
1020         u->pcm_handle = NULL;
1021     }
1022
1023     return -PA_ERR_IO;
1024 }
1025
1026 /* Called from IO context */
1027 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1028     struct userdata *u = PA_SOURCE(o)->userdata;
1029
1030     switch (code) {
1031
1032         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1033             pa_usec_t r = 0;
1034
1035             if (u->pcm_handle)
1036                 r = source_get_latency(u);
1037
1038             *((pa_usec_t*) data) = r;
1039
1040             return 0;
1041         }
1042
1043         case PA_SOURCE_MESSAGE_SET_STATE:
1044
1045             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1046
1047                 case PA_SOURCE_SUSPENDED: {
1048                     int r;
1049
1050                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1051
1052                     if ((r = suspend(u)) < 0)
1053                         return r;
1054
1055                     break;
1056                 }
1057
1058                 case PA_SOURCE_IDLE:
1059                 case PA_SOURCE_RUNNING: {
1060                     int r;
1061
1062                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1063                         if (build_pollfd(u) < 0)
1064                             return -PA_ERR_IO;
1065                     }
1066
1067                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1068                         if ((r = unsuspend(u)) < 0)
1069                             return r;
1070                     }
1071
1072                     break;
1073                 }
1074
1075                 case PA_SOURCE_UNLINKED:
1076                 case PA_SOURCE_INIT:
1077                 case PA_SOURCE_INVALID_STATE:
1078                     ;
1079             }
1080
1081             break;
1082     }
1083
1084     return pa_source_process_msg(o, code, data, offset, chunk);
1085 }
1086
1087 /* Called from main context */
1088 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1089     pa_source_state_t old_state;
1090     struct userdata *u;
1091
1092     pa_source_assert_ref(s);
1093     pa_assert_se(u = s->userdata);
1094
1095     old_state = pa_source_get_state(u->source);
1096
1097     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1098         reserve_done(u);
1099     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1100         if (reserve_init(u, u->device_name) < 0)
1101             return -PA_ERR_BUSY;
1102
1103     return 0;
1104 }
1105
1106 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1107     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1108
1109     pa_assert(u);
1110     pa_assert(u->mixer_handle);
1111
1112     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1113         return 0;
1114
1115     if (!PA_SOURCE_IS_LINKED(u->source->state))
1116         return 0;
1117
1118     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1119         return 0;
1120
1121     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1122         pa_source_get_volume(u->source, TRUE);
1123         pa_source_get_mute(u->source, TRUE);
1124     }
1125
1126     return 0;
1127 }
1128
1129 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1130     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1131
1132     pa_assert(u);
1133     pa_assert(u->mixer_handle);
1134
1135     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1136         return 0;
1137
1138     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1139         return 0;
1140
1141     if (mask & SND_CTL_EVENT_MASK_VALUE)
1142         pa_source_update_volume_and_mute(u->source);
1143
1144     return 0;
1145 }
1146
1147 static void source_get_volume_cb(pa_source *s) {
1148     struct userdata *u = s->userdata;
1149     pa_cvolume r;
1150     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1151
1152     pa_assert(u);
1153     pa_assert(u->mixer_path);
1154     pa_assert(u->mixer_handle);
1155
1156     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1157         return;
1158
1159     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1160     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1161
1162     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1163
1164     if (u->mixer_path->has_dB) {
1165         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1166
1167         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1168     }
1169
1170     if (pa_cvolume_equal(&u->hardware_volume, &r))
1171         return;
1172
1173     s->real_volume = u->hardware_volume = r;
1174
1175     /* Hmm, so the hardware volume changed, let's reset our software volume */
1176     if (u->mixer_path->has_dB)
1177         pa_source_set_soft_volume(s, NULL);
1178 }
1179
1180 static void source_set_volume_cb(pa_source *s) {
1181     struct userdata *u = s->userdata;
1182     pa_cvolume r;
1183     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1184     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1185
1186     pa_assert(u);
1187     pa_assert(u->mixer_path);
1188     pa_assert(u->mixer_handle);
1189
1190     /* Shift up by the base volume */
1191     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1192
1193     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1194         return;
1195
1196     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1197     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1198
1199     u->hardware_volume = r;
1200
1201     if (u->mixer_path->has_dB) {
1202         pa_cvolume new_soft_volume;
1203         pa_bool_t accurate_enough;
1204         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1205
1206         /* Match exactly what the user requested by software */
1207         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1208
1209         /* If the adjustment to do in software is only minimal we
1210          * can skip it. That saves us CPU at the expense of a bit of
1211          * accuracy */
1212         accurate_enough =
1213             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1214             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1215
1216         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1217         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1218         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1219         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1220         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1221                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1222                      pa_yes_no(accurate_enough));
1223         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1224
1225         if (!accurate_enough)
1226             s->soft_volume = new_soft_volume;
1227
1228     } else {
1229         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1230
1231         /* We can't match exactly what the user requested, hence let's
1232          * at least tell the user about it */
1233
1234         s->real_volume = r;
1235     }
1236 }
1237
1238 static void source_write_volume_cb(pa_source *s) {
1239     struct userdata *u = s->userdata;
1240     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1241
1242     pa_assert(u);
1243     pa_assert(u->mixer_path);
1244     pa_assert(u->mixer_handle);
1245     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1246
1247     /* Shift up by the base volume */
1248     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1249
1250     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1251         pa_log_error("Writing HW volume failed");
1252     else {
1253         pa_cvolume tmp_vol;
1254         pa_bool_t accurate_enough;
1255
1256         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1258
1259         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1260         accurate_enough =
1261             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1262             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1263
1264         if (!accurate_enough) {
1265             union {
1266                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1267                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1268             } vol;
1269
1270             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1271                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1272                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1273             pa_log_debug("                                           in dB: %s (request) != %s",
1274                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1275                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1276         }
1277     }
1278 }
1279
1280 static void source_get_mute_cb(pa_source *s) {
1281     struct userdata *u = s->userdata;
1282     pa_bool_t b;
1283
1284     pa_assert(u);
1285     pa_assert(u->mixer_path);
1286     pa_assert(u->mixer_handle);
1287
1288     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1289         return;
1290
1291     s->muted = b;
1292 }
1293
1294 static void source_set_mute_cb(pa_source *s) {
1295     struct userdata *u = s->userdata;
1296
1297     pa_assert(u);
1298     pa_assert(u->mixer_path);
1299     pa_assert(u->mixer_handle);
1300
1301     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1302 }
1303
1304 static void mixer_volume_init(struct userdata *u) {
1305     pa_assert(u);
1306
1307     if (!u->mixer_path->has_volume) {
1308         pa_source_set_write_volume_callback(u->source, NULL);
1309         pa_source_set_get_volume_callback(u->source, NULL);
1310         pa_source_set_set_volume_callback(u->source, NULL);
1311
1312         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1313     } else {
1314         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1315         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1316
1317         if (u->mixer_path->has_dB && u->deferred_volume) {
1318             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1319             pa_log_info("Successfully enabled deferred volume.");
1320         } else
1321             pa_source_set_write_volume_callback(u->source, NULL);
1322
1323         if (u->mixer_path->has_dB) {
1324             pa_source_enable_decibel_volume(u->source, TRUE);
1325             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1326
1327             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1328             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1329
1330             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1331         } else {
1332             pa_source_enable_decibel_volume(u->source, FALSE);
1333             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1334
1335             u->source->base_volume = PA_VOLUME_NORM;
1336             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1337         }
1338
1339         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1340     }
1341
1342     if (!u->mixer_path->has_mute) {
1343         pa_source_set_get_mute_callback(u->source, NULL);
1344         pa_source_set_set_mute_callback(u->source, NULL);
1345         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1346     } else {
1347         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1348         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1349         pa_log_info("Using hardware mute control.");
1350     }
1351 }
1352
1353 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1354     struct userdata *u = s->userdata;
1355     pa_alsa_port_data *data;
1356
1357     pa_assert(u);
1358     pa_assert(p);
1359     pa_assert(u->mixer_handle);
1360
1361     data = PA_DEVICE_PORT_DATA(p);
1362
1363     pa_assert_se(u->mixer_path = data->path);
1364     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1365
1366     mixer_volume_init(u);
1367
1368     if (data->setting)
1369         pa_alsa_setting_select(data->setting, u->mixer_handle);
1370
1371     if (s->set_mute)
1372         s->set_mute(s);
1373     if (s->set_volume)
1374         s->set_volume(s);
1375
1376     return 0;
1377 }
1378
1379 static void source_update_requested_latency_cb(pa_source *s) {
1380     struct userdata *u = s->userdata;
1381     pa_assert(u);
1382     pa_assert(u->use_tsched); /* only when timer scheduling is used
1383                                * we can dynamically adjust the
1384                                * latency */
1385
1386     if (!u->pcm_handle)
1387         return;
1388
1389     update_sw_params(u);
1390 }
1391
1392 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1393 {
1394     struct userdata *u = s->userdata;
1395     int i;
1396     pa_bool_t supported = FALSE;
1397
1398     pa_assert(u);
1399
1400     for (i = 0; u->rates[i]; i++) {
1401         if (u->rates[i] == rate) {
1402             supported = TRUE;
1403             break;
1404         }
1405     }
1406
1407     if (!supported) {
1408         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1409         return FALSE;
1410     }
1411
1412     if (!PA_SOURCE_IS_OPENED(s->state)) {
1413         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1414         u->source->sample_spec.rate = rate;
1415         return TRUE;
1416     }
1417
1418     return FALSE;
1419 }
1420
1421 static void thread_func(void *userdata) {
1422     struct userdata *u = userdata;
1423     unsigned short revents = 0;
1424
1425     pa_assert(u);
1426
1427     pa_log_debug("Thread starting up");
1428
1429     if (u->core->realtime_scheduling)
1430         pa_make_realtime(u->core->realtime_priority);
1431
1432     pa_thread_mq_install(&u->thread_mq);
1433
1434     for (;;) {
1435         int ret;
1436         pa_usec_t rtpoll_sleep = 0;
1437
1438 #ifdef DEBUG_TIMING
1439         pa_log_debug("Loop");
1440 #endif
1441
1442         /* Read some data and pass it to the sources */
1443         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1444             int work_done;
1445             pa_usec_t sleep_usec = 0;
1446             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1447
1448             if (u->first) {
1449                 pa_log_info("Starting capture.");
1450                 snd_pcm_start(u->pcm_handle);
1451
1452                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1453
1454                 u->first = FALSE;
1455             }
1456
1457             if (u->use_mmap)
1458                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1459             else
1460                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1461
1462             if (work_done < 0)
1463                 goto fail;
1464
1465 /*             pa_log_debug("work_done = %i", work_done); */
1466
1467             if (work_done)
1468                 update_smoother(u);
1469
1470             if (u->use_tsched) {
1471                 pa_usec_t cusec;
1472
1473                 /* OK, the capture buffer is now empty, let's
1474                  * calculate when to wake up next */
1475
1476 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1477
1478                 /* Convert from the sound card time domain to the
1479                  * system time domain */
1480                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1481
1482 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1483
1484                 /* We don't trust the conversion, so we wake up whatever comes first */
1485                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1486             }
1487         }
1488
1489         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1490             pa_usec_t volume_sleep;
1491             pa_source_volume_change_apply(u->source, &volume_sleep);
1492             if (volume_sleep > 0) {
1493                 if (rtpoll_sleep > 0)
1494                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1495                 else
1496                     rtpoll_sleep = volume_sleep;
1497             }
1498         }
1499
1500         if (rtpoll_sleep > 0)
1501             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1502         else
1503             pa_rtpoll_set_timer_disabled(u->rtpoll);
1504
1505         /* Hmm, nothing to do. Let's sleep */
1506         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1507             goto fail;
1508
1509         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1510             pa_source_volume_change_apply(u->source, NULL);
1511
1512         if (ret == 0)
1513             goto finish;
1514
1515         /* Tell ALSA about this and process its response */
1516         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1517             struct pollfd *pollfd;
1518             int err;
1519             unsigned n;
1520
1521             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1522
1523             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1524                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1525                 goto fail;
1526             }
1527
1528             if (revents & ~POLLIN) {
1529                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1530                     goto fail;
1531
1532                 u->first = TRUE;
1533                 revents = 0;
1534             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1535                 pa_log_debug("Wakeup from ALSA!");
1536
1537         } else
1538             revents = 0;
1539     }
1540
1541 fail:
1542     /* If this was no regular exit from the loop we have to continue
1543      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1544     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1545     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1546
1547 finish:
1548     pa_log_debug("Thread shutting down");
1549 }
1550
1551 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1552     const char *n;
1553     char *t;
1554
1555     pa_assert(data);
1556     pa_assert(ma);
1557     pa_assert(device_name);
1558
1559     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1560         pa_source_new_data_set_name(data, n);
1561         data->namereg_fail = TRUE;
1562         return;
1563     }
1564
1565     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1566         data->namereg_fail = TRUE;
1567     else {
1568         n = device_id ? device_id : device_name;
1569         data->namereg_fail = FALSE;
1570     }
1571
1572     if (mapping)
1573         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1574     else
1575         t = pa_sprintf_malloc("alsa_input.%s", n);
1576
1577     pa_source_new_data_set_name(data, t);
1578     pa_xfree(t);
1579 }
1580
1581 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1582     snd_hctl_t *hctl;
1583
1584     if (!mapping && !element)
1585         return;
1586
1587     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1588         pa_log_info("Failed to find a working mixer device.");
1589         return;
1590     }
1591
1592     if (element) {
1593
1594         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1595             goto fail;
1596
1597         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1598             goto fail;
1599
1600         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1601         pa_alsa_path_dump(u->mixer_path);
1602     } else if (!(u->mixer_path_set = mapping->input_path_set))
1603         goto fail;
1604
1605     return;
1606
1607 fail:
1608
1609     if (u->mixer_path) {
1610         pa_alsa_path_free(u->mixer_path);
1611         u->mixer_path = NULL;
1612     }
1613
1614     if (u->mixer_handle) {
1615         snd_mixer_close(u->mixer_handle);
1616         u->mixer_handle = NULL;
1617     }
1618 }
1619
1620 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1621     pa_bool_t need_mixer_callback = FALSE;
1622
1623     pa_assert(u);
1624
1625     if (!u->mixer_handle)
1626         return 0;
1627
1628     if (u->source->active_port) {
1629         pa_alsa_port_data *data;
1630
1631         /* We have a list of supported paths, so let's activate the
1632          * one that has been chosen as active */
1633
1634         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1635         u->mixer_path = data->path;
1636
1637         pa_alsa_path_select(data->path, u->mixer_handle);
1638
1639         if (data->setting)
1640             pa_alsa_setting_select(data->setting, u->mixer_handle);
1641
1642     } else {
1643
1644         if (!u->mixer_path && u->mixer_path_set)
1645             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1646
1647         if (u->mixer_path) {
1648             /* Hmm, we have only a single path, then let's activate it */
1649
1650             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1651
1652             if (u->mixer_path->settings)
1653                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1654         } else
1655             return 0;
1656     }
1657
1658     mixer_volume_init(u);
1659
1660     /* Will we need to register callbacks? */
1661     if (u->mixer_path_set && u->mixer_path_set->paths) {
1662         pa_alsa_path *p;
1663         void *state;
1664
1665         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1666             if (p->has_volume || p->has_mute)
1667                 need_mixer_callback = TRUE;
1668         }
1669     }
1670     else if (u->mixer_path)
1671         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1672
1673     if (need_mixer_callback) {
1674         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1675         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1676             u->mixer_pd = pa_alsa_mixer_pdata_new();
1677             mixer_callback = io_mixer_callback;
1678
1679             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1680                 pa_log("Failed to initialize file descriptor monitoring");
1681                 return -1;
1682             }
1683         } else {
1684             u->mixer_fdl = pa_alsa_fdlist_new();
1685             mixer_callback = ctl_mixer_callback;
1686
1687             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1688                 pa_log("Failed to initialize file descriptor monitoring");
1689                 return -1;
1690             }
1691         }
1692
1693         if (u->mixer_path_set)
1694             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1695         else
1696             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1697     }
1698
1699     return 0;
1700 }
1701
1702 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1703
1704     struct userdata *u = NULL;
1705     const char *dev_id = NULL;
1706     pa_sample_spec ss;
1707     uint32_t alternate_sample_rate;
1708     pa_channel_map map;
1709     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1710     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1711     size_t frame_size;
1712     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1713     pa_source_new_data data;
1714     pa_alsa_profile_set *profile_set = NULL;
1715
1716     pa_assert(m);
1717     pa_assert(ma);
1718
1719     ss = m->core->default_sample_spec;
1720     map = m->core->default_channel_map;
1721     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1722         pa_log("Failed to parse sample specification and channel map");
1723         goto fail;
1724     }
1725
1726     alternate_sample_rate = m->core->alternate_sample_rate;
1727     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1728         pa_log("Failed to parse alternate sample rate");
1729         goto fail;
1730     }
1731
1732     frame_size = pa_frame_size(&ss);
1733
1734     nfrags = m->core->default_n_fragments;
1735     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1736     if (frag_size <= 0)
1737         frag_size = (uint32_t) frame_size;
1738     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1739     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1740
1741     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1742         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1743         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1744         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1745         pa_log("Failed to parse buffer metrics");
1746         goto fail;
1747     }
1748
1749     buffer_size = nfrags * frag_size;
1750
1751     period_frames = frag_size/frame_size;
1752     buffer_frames = buffer_size/frame_size;
1753     tsched_frames = tsched_size/frame_size;
1754
1755     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1756         pa_log("Failed to parse mmap argument.");
1757         goto fail;
1758     }
1759
1760     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1761         pa_log("Failed to parse tsched argument.");
1762         goto fail;
1763     }
1764
1765     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1766         pa_log("Failed to parse ignore_dB argument.");
1767         goto fail;
1768     }
1769
1770     deferred_volume = m->core->deferred_volume;
1771     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1772         pa_log("Failed to parse deferred_volume argument.");
1773         goto fail;
1774     }
1775
1776     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1777         pa_log("Failed to parse fixed_latency_range argument.");
1778         goto fail;
1779     }
1780
1781     use_tsched = pa_alsa_may_tsched(use_tsched);
1782
1783     u = pa_xnew0(struct userdata, 1);
1784     u->core = m->core;
1785     u->module = m;
1786     u->use_mmap = use_mmap;
1787     u->use_tsched = use_tsched;
1788     u->deferred_volume = deferred_volume;
1789     u->fixed_latency_range = fixed_latency_range;
1790     u->first = TRUE;
1791     u->rtpoll = pa_rtpoll_new();
1792     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1793
1794     u->smoother = pa_smoother_new(
1795             SMOOTHER_ADJUST_USEC,
1796             SMOOTHER_WINDOW_USEC,
1797             TRUE,
1798             TRUE,
1799             5,
1800             pa_rtclock_now(),
1801             TRUE);
1802     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1803
1804     dev_id = pa_modargs_get_value(
1805             ma, "device_id",
1806             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1807
1808     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1809
1810     if (reserve_init(u, dev_id) < 0)
1811         goto fail;
1812
1813     if (reserve_monitor_init(u, dev_id) < 0)
1814         goto fail;
1815
1816     b = use_mmap;
1817     d = use_tsched;
1818
1819     if (mapping) {
1820
1821         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1822             pa_log("device_id= not set");
1823             goto fail;
1824         }
1825
1826         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1827                       dev_id,
1828                       &u->device_name,
1829                       &ss, &map,
1830                       SND_PCM_STREAM_CAPTURE,
1831                       &period_frames, &buffer_frames, tsched_frames,
1832                       &b, &d, mapping)))
1833             goto fail;
1834
1835     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1836
1837         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1838             goto fail;
1839
1840         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1841                       dev_id,
1842                       &u->device_name,
1843                       &ss, &map,
1844                       SND_PCM_STREAM_CAPTURE,
1845                       &period_frames, &buffer_frames, tsched_frames,
1846                       &b, &d, profile_set, &mapping)))
1847             goto fail;
1848
1849     } else {
1850
1851         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1852                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1853                       &u->device_name,
1854                       &ss, &map,
1855                       SND_PCM_STREAM_CAPTURE,
1856                       &period_frames, &buffer_frames, tsched_frames,
1857                       &b, &d, FALSE)))
1858             goto fail;
1859     }
1860
1861     pa_assert(u->device_name);
1862     pa_log_info("Successfully opened device %s.", u->device_name);
1863
1864     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1865         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1866         goto fail;
1867     }
1868
1869     if (mapping)
1870         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1871
1872     if (use_mmap && !b) {
1873         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1874         u->use_mmap = use_mmap = FALSE;
1875     }
1876
1877     if (use_tsched && (!b || !d)) {
1878         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1879         u->use_tsched = use_tsched = FALSE;
1880     }
1881
1882     if (u->use_mmap)
1883         pa_log_info("Successfully enabled mmap() mode.");
1884
1885     if (u->use_tsched) {
1886         pa_log_info("Successfully enabled timer-based scheduling mode.");
1887         if (u->fixed_latency_range)
1888             pa_log_info("Disabling latency range changes on overrun");
1889     }
1890
1891     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1892     if (!u->rates) {
1893         pa_log_error("Failed to find any supported sample rates.");
1894         goto fail;
1895     }
1896
1897     /* ALSA might tweak the sample spec, so recalculate the frame size */
1898     frame_size = pa_frame_size(&ss);
1899
1900     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1901
1902     pa_source_new_data_init(&data);
1903     data.driver = driver;
1904     data.module = m;
1905     data.card = card;
1906     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1907
1908     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1909      * variable instead of using &data.namereg_fail directly, because
1910      * data.namereg_fail is a bitfield and taking the address of a bitfield
1911      * variable is impossible. */
1912     namereg_fail = data.namereg_fail;
1913     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1914         pa_log("Failed to parse namereg_fail argument.");
1915         pa_source_new_data_done(&data);
1916         goto fail;
1917     }
1918     data.namereg_fail = namereg_fail;
1919
1920     pa_source_new_data_set_sample_spec(&data, &ss);
1921     pa_source_new_data_set_channel_map(&data, &map);
1922     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1923
1924     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1925     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1926     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1927     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1928     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1929
1930     if (mapping) {
1931         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1932         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1933     }
1934
1935     pa_alsa_init_description(data.proplist);
1936
1937     if (u->control_device)
1938         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1939
1940     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1941         pa_log("Invalid properties");
1942         pa_source_new_data_done(&data);
1943         goto fail;
1944     }
1945
1946     if (u->mixer_path_set)
1947         pa_alsa_add_ports(&data.ports, u->mixer_path_set, card);
1948
1949     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1950     pa_source_new_data_done(&data);
1951
1952     if (!u->source) {
1953         pa_log("Failed to create source object");
1954         goto fail;
1955     }
1956
1957     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1958                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1959         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1960         goto fail;
1961     }
1962
1963     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1964                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1965         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1966         goto fail;
1967     }
1968
1969     u->source->parent.process_msg = source_process_msg;
1970     if (u->use_tsched)
1971         u->source->update_requested_latency = source_update_requested_latency_cb;
1972     u->source->set_state = source_set_state_cb;
1973     u->source->set_port = source_set_port_cb;
1974     if (u->source->alternate_sample_rate)
1975         u->source->update_rate = source_update_rate_cb;
1976     u->source->userdata = u;
1977
1978     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1979     pa_source_set_rtpoll(u->source, u->rtpoll);
1980
1981     u->frame_size = frame_size;
1982     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1983     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1984     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1985
1986     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1987                 (double) u->hwbuf_size / (double) u->fragment_size,
1988                 (long unsigned) u->fragment_size,
1989                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1990                 (long unsigned) u->hwbuf_size,
1991                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1992
1993     if (u->use_tsched) {
1994         u->tsched_watermark_ref = tsched_watermark;
1995         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
1996     }
1997     else
1998         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1999
2000     reserve_update(u);
2001
2002     if (update_sw_params(u) < 0)
2003         goto fail;
2004
2005     if (setup_mixer(u, ignore_dB) < 0)
2006         goto fail;
2007
2008     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2009
2010     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2011         pa_log("Failed to create thread.");
2012         goto fail;
2013     }
2014
2015     /* Get initial mixer settings */
2016     if (data.volume_is_set) {
2017         if (u->source->set_volume)
2018             u->source->set_volume(u->source);
2019     } else {
2020         if (u->source->get_volume)
2021             u->source->get_volume(u->source);
2022     }
2023
2024     if (data.muted_is_set) {
2025         if (u->source->set_mute)
2026             u->source->set_mute(u->source);
2027     } else {
2028         if (u->source->get_mute)
2029             u->source->get_mute(u->source);
2030     }
2031
2032     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2033         u->source->write_volume(u->source);
2034
2035     pa_source_put(u->source);
2036
2037     if (profile_set)
2038         pa_alsa_profile_set_free(profile_set);
2039
2040     return u->source;
2041
2042 fail:
2043
2044     if (u)
2045         userdata_free(u);
2046
2047     if (profile_set)
2048         pa_alsa_profile_set_free(profile_set);
2049
2050     return NULL;
2051 }
2052
2053 static void userdata_free(struct userdata *u) {
2054     pa_assert(u);
2055
2056     if (u->source)
2057         pa_source_unlink(u->source);
2058
2059     if (u->thread) {
2060         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2061         pa_thread_free(u->thread);
2062     }
2063
2064     pa_thread_mq_done(&u->thread_mq);
2065
2066     if (u->source)
2067         pa_source_unref(u->source);
2068
2069     if (u->mixer_pd)
2070         pa_alsa_mixer_pdata_free(u->mixer_pd);
2071
2072     if (u->alsa_rtpoll_item)
2073         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2074
2075     if (u->rtpoll)
2076         pa_rtpoll_free(u->rtpoll);
2077
2078     if (u->pcm_handle) {
2079         snd_pcm_drop(u->pcm_handle);
2080         snd_pcm_close(u->pcm_handle);
2081     }
2082
2083     if (u->mixer_fdl)
2084         pa_alsa_fdlist_free(u->mixer_fdl);
2085
2086     if (u->mixer_path && !u->mixer_path_set)
2087         pa_alsa_path_free(u->mixer_path);
2088
2089     if (u->mixer_handle)
2090         snd_mixer_close(u->mixer_handle);
2091
2092     if (u->smoother)
2093         pa_smoother_free(u->smoother);
2094
2095     if (u->rates)
2096         pa_xfree(u->rates);
2097
2098     reserve_done(u);
2099     monitor_done(u);
2100
2101     pa_xfree(u->device_name);
2102     pa_xfree(u->control_device);
2103     pa_xfree(u->paths_dir);
2104     pa_xfree(u);
2105 }
2106
2107 void pa_alsa_source_free(pa_source *s) {
2108     struct userdata *u;
2109
2110     pa_source_assert_ref(s);
2111     pa_assert_se(u = s->userdata);
2112
2113     userdata_free(u);
2114 }