alsa-sink/source: Warn for scheduling delays
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
76 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84     pa_core *core;
85     pa_module *module;
86     pa_source *source;
87
88     pa_thread *thread;
89     pa_thread_mq thread_mq;
90     pa_rtpoll *rtpoll;
91
92     snd_pcm_t *pcm_handle;
93
94     char *paths_dir;
95     pa_alsa_fdlist *mixer_fdl;
96     pa_alsa_mixer_pdata *mixer_pd;
97     snd_mixer_t *mixer_handle;
98     pa_alsa_path_set *mixer_path_set;
99     pa_alsa_path *mixer_path;
100
101     pa_cvolume hardware_volume;
102
103     unsigned int *rates;
104
105     size_t
106         frame_size,
107         fragment_size,
108         hwbuf_size,
109         tsched_watermark,
110         tsched_watermark_ref,
111         hwbuf_unused,
112         min_sleep,
113         min_wakeup,
114         watermark_inc_step,
115         watermark_dec_step,
116         watermark_inc_threshold,
117         watermark_dec_threshold;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121
122     char *device_name;  /* name of the PCM device */
123     char *control_device; /* name of the control device */
124
125     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
126
127     pa_bool_t first;
128
129     pa_rtpoll_item *alsa_rtpoll_item;
130
131     pa_smoother *smoother;
132     uint64_t read_count;
133     pa_usec_t smoother_interval;
134     pa_usec_t last_smoother_update;
135
136     pa_reserve_wrapper *reserve;
137     pa_hook_slot *reserve_slot;
138     pa_reserve_monitor_wrapper *monitor;
139     pa_hook_slot *monitor_slot;
140
141     /* ucm context */
142     pa_alsa_ucm_mapping_context *ucm_context;
143 };
144
145 static void userdata_free(struct userdata *u);
146
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
148     pa_assert(r);
149     pa_assert(u);
150
151     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
152         return PA_HOOK_CANCEL;
153
154     return PA_HOOK_OK;
155 }
156
157 static void reserve_done(struct userdata *u) {
158     pa_assert(u);
159
160     if (u->reserve_slot) {
161         pa_hook_slot_free(u->reserve_slot);
162         u->reserve_slot = NULL;
163     }
164
165     if (u->reserve) {
166         pa_reserve_wrapper_unref(u->reserve);
167         u->reserve = NULL;
168     }
169 }
170
171 static void reserve_update(struct userdata *u) {
172     const char *description;
173     pa_assert(u);
174
175     if (!u->source || !u->reserve)
176         return;
177
178     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
180 }
181
182 static int reserve_init(struct userdata *u, const char *dname) {
183     char *rname;
184
185     pa_assert(u);
186     pa_assert(dname);
187
188     if (u->reserve)
189         return 0;
190
191     if (pa_in_system_mode())
192         return 0;
193
194     if (!(rname = pa_alsa_get_reserve_name(dname)))
195         return 0;
196
197     /* We are resuming, try to lock the device */
198     u->reserve = pa_reserve_wrapper_get(u->core, rname);
199     pa_xfree(rname);
200
201     if (!(u->reserve))
202         return -1;
203
204     reserve_update(u);
205
206     pa_assert(!u->reserve_slot);
207     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
208
209     return 0;
210 }
211
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
213     pa_bool_t b;
214
215     pa_assert(w);
216     pa_assert(u);
217
218     b = PA_PTR_TO_UINT(busy) && !u->reserve;
219
220     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
221     return PA_HOOK_OK;
222 }
223
224 static void monitor_done(struct userdata *u) {
225     pa_assert(u);
226
227     if (u->monitor_slot) {
228         pa_hook_slot_free(u->monitor_slot);
229         u->monitor_slot = NULL;
230     }
231
232     if (u->monitor) {
233         pa_reserve_monitor_wrapper_unref(u->monitor);
234         u->monitor = NULL;
235     }
236 }
237
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
239     char *rname;
240
241     pa_assert(u);
242     pa_assert(dname);
243
244     if (pa_in_system_mode())
245         return 0;
246
247     if (!(rname = pa_alsa_get_reserve_name(dname)))
248         return 0;
249
250     /* We are resuming, try to lock the device */
251     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
252     pa_xfree(rname);
253
254     if (!(u->monitor))
255         return -1;
256
257     pa_assert(!u->monitor_slot);
258     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
259
260     return 0;
261 }
262
263 static void fix_min_sleep_wakeup(struct userdata *u) {
264     size_t max_use, max_use_2;
265
266     pa_assert(u);
267     pa_assert(u->use_tsched);
268
269     max_use = u->hwbuf_size - u->hwbuf_unused;
270     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
271
272     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
273     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
274
275     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
276     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
277 }
278
279 static void fix_tsched_watermark(struct userdata *u) {
280     size_t max_use;
281     pa_assert(u);
282     pa_assert(u->use_tsched);
283
284     max_use = u->hwbuf_size - u->hwbuf_unused;
285
286     if (u->tsched_watermark > max_use - u->min_sleep)
287         u->tsched_watermark = max_use - u->min_sleep;
288
289     if (u->tsched_watermark < u->min_wakeup)
290         u->tsched_watermark = u->min_wakeup;
291 }
292
293 static void increase_watermark(struct userdata *u) {
294     size_t old_watermark;
295     pa_usec_t old_min_latency, new_min_latency;
296
297     pa_assert(u);
298     pa_assert(u->use_tsched);
299
300     /* First, just try to increase the watermark */
301     old_watermark = u->tsched_watermark;
302     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
303     fix_tsched_watermark(u);
304
305     if (old_watermark != u->tsched_watermark) {
306         pa_log_info("Increasing wakeup watermark to %0.2f ms",
307                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
308         return;
309     }
310
311     /* Hmm, we cannot increase the watermark any further, hence let's
312      raise the latency unless doing so was disabled in
313      configuration */
314     if (u->fixed_latency_range)
315         return;
316
317     old_min_latency = u->source->thread_info.min_latency;
318     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
320
321     if (old_min_latency != new_min_latency) {
322         pa_log_info("Increasing minimal latency to %0.2f ms",
323                     (double) new_min_latency / PA_USEC_PER_MSEC);
324
325         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
326     }
327
328     /* When we reach this we're officialy fucked! */
329 }
330
331 static void decrease_watermark(struct userdata *u) {
332     size_t old_watermark;
333     pa_usec_t now;
334
335     pa_assert(u);
336     pa_assert(u->use_tsched);
337
338     now = pa_rtclock_now();
339
340     if (u->watermark_dec_not_before <= 0)
341         goto restart;
342
343     if (u->watermark_dec_not_before > now)
344         return;
345
346     old_watermark = u->tsched_watermark;
347
348     if (u->tsched_watermark < u->watermark_dec_step)
349         u->tsched_watermark = u->tsched_watermark / 2;
350     else
351         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
352
353     fix_tsched_watermark(u);
354
355     if (old_watermark != u->tsched_watermark)
356         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
358
359     /* We don't change the latency range*/
360
361 restart:
362     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
363 }
364
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
366     pa_usec_t wm, usec;
367
368     pa_assert(sleep_usec);
369     pa_assert(process_usec);
370
371     pa_assert(u);
372     pa_assert(u->use_tsched);
373
374     usec = pa_source_get_requested_latency_within_thread(u->source);
375
376     if (usec == (pa_usec_t) -1)
377         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
378
379     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
380
381     if (wm > usec)
382         wm = usec/2;
383
384     *sleep_usec = usec - wm;
385     *process_usec = wm;
386
387 #ifdef DEBUG_TIMING
388     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389                  (unsigned long) (usec / PA_USEC_PER_MSEC),
390                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
392 #endif
393 }
394
395 static int try_recover(struct userdata *u, const char *call, int err) {
396     pa_assert(u);
397     pa_assert(call);
398     pa_assert(err < 0);
399
400     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
401
402     pa_assert(err != -EAGAIN);
403
404     if (err == -EPIPE)
405         pa_log_debug("%s: Buffer overrun!", call);
406
407     if (err == -ESTRPIPE)
408         pa_log_debug("%s: System suspended!", call);
409
410     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411         pa_log("%s: %s", call, pa_alsa_strerror(err));
412         return -1;
413     }
414
415     u->first = TRUE;
416     return 0;
417 }
418
419 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
420     size_t left_to_record;
421     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
422     pa_bool_t overrun = FALSE;
423
424     /* We use <= instead of < for this check here because an overrun
425      * only happens after the last sample was processed, not already when
426      * it is removed from the buffer. This is particularly important
427      * when block transfer is used. */
428
429     if (n_bytes <= rec_space)
430         left_to_record = rec_space - n_bytes;
431     else {
432
433         /* We got a dropout. What a mess! */
434         left_to_record = 0;
435         overrun = TRUE;
436
437 #ifdef DEBUG_TIMING
438         PA_DEBUG_TRAP;
439 #endif
440
441         if (pa_log_ratelimit(PA_LOG_INFO))
442             pa_log_info("Overrun!");
443     }
444
445 #ifdef DEBUG_TIMING
446     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449     if (u->use_tsched) {
450         pa_bool_t reset_not_before = TRUE;
451
452         if (overrun || left_to_record < u->watermark_inc_threshold)
453             increase_watermark(u);
454         else if (left_to_record > u->watermark_dec_threshold) {
455             reset_not_before = FALSE;
456
457             /* We decrease the watermark only if have actually
458              * been woken up by a timeout. If something else woke
459              * us up it's too easy to fulfill the deadlines... */
460
461             if (on_timeout)
462                 decrease_watermark(u);
463         }
464
465         if (reset_not_before)
466             u->watermark_dec_not_before = 0;
467     }
468
469     return left_to_record;
470 }
471
472 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473     pa_bool_t work_done = FALSE;
474     pa_usec_t max_sleep_usec = 0, process_usec = 0;
475     size_t left_to_record;
476     unsigned j = 0;
477
478     pa_assert(u);
479     pa_source_assert_ref(u->source);
480
481     if (u->use_tsched)
482         hw_sleep_time(u, &max_sleep_usec, &process_usec);
483
484     for (;;) {
485         snd_pcm_sframes_t n;
486         size_t n_bytes;
487         int r;
488         pa_bool_t after_avail = TRUE;
489
490         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
491
492             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
493                 continue;
494
495             return r;
496         }
497
498         n_bytes = (size_t) n * u->frame_size;
499
500 #ifdef DEBUG_TIMING
501         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
502 #endif
503
504         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
505         on_timeout = FALSE;
506
507         if (u->use_tsched)
508             if (!polled &&
509                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
510 #ifdef DEBUG_TIMING
511                 pa_log_debug("Not reading, because too early.");
512 #endif
513                 break;
514             }
515
516         if (PA_UNLIKELY(n_bytes <= 0)) {
517
518             if (polled)
519                 PA_ONCE_BEGIN {
520                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
521                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
524                            pa_strnull(dn));
525                     pa_xfree(dn);
526                 } PA_ONCE_END;
527
528 #ifdef DEBUG_TIMING
529             pa_log_debug("Not reading, because not necessary.");
530 #endif
531             break;
532         }
533
534
535         if (++j > 10) {
536 #ifdef DEBUG_TIMING
537             pa_log_debug("Not filling up, because already too many iterations.");
538 #endif
539
540             break;
541         }
542
543         polled = FALSE;
544
545 #ifdef DEBUG_TIMING
546         pa_log_debug("Reading");
547 #endif
548
549         for (;;) {
550             pa_memchunk chunk;
551             void *p;
552             int err;
553             const snd_pcm_channel_area_t *areas;
554             snd_pcm_uframes_t offset, frames;
555             snd_pcm_sframes_t sframes;
556
557             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
558 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
559
560             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
561
562                 if (!after_avail && err == -EAGAIN)
563                     break;
564
565                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
566                     continue;
567
568                 return r;
569             }
570
571             /* Make sure that if these memblocks need to be copied they will fit into one slot */
572             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
573                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
574
575             if (!after_avail && frames == 0)
576                 break;
577
578             pa_assert(frames > 0);
579             after_avail = FALSE;
580
581             /* Check these are multiples of 8 bit */
582             pa_assert((areas[0].first & 7) == 0);
583             pa_assert((areas[0].step & 7)== 0);
584
585             /* We assume a single interleaved memory buffer */
586             pa_assert((areas[0].first >> 3) == 0);
587             pa_assert((areas[0].step >> 3) == u->frame_size);
588
589             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
590
591             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
592             chunk.length = pa_memblock_get_length(chunk.memblock);
593             chunk.index = 0;
594
595             pa_source_post(u->source, &chunk);
596             pa_memblock_unref_fixed(chunk.memblock);
597
598             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
599
600                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
601                     continue;
602
603                 return r;
604             }
605
606             work_done = TRUE;
607
608             u->read_count += frames * u->frame_size;
609
610 #ifdef DEBUG_TIMING
611             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
612 #endif
613
614             if ((size_t) frames * u->frame_size >= n_bytes)
615                 break;
616
617             n_bytes -= (size_t) frames * u->frame_size;
618         }
619     }
620
621     if (u->use_tsched) {
622         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
623         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
624
625         if (*sleep_usec > process_usec)
626             *sleep_usec -= process_usec;
627         else
628             *sleep_usec = 0;
629     }
630
631     return work_done ? 1 : 0;
632 }
633
634 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
635     int work_done = FALSE;
636     pa_usec_t max_sleep_usec = 0, process_usec = 0;
637     size_t left_to_record;
638     unsigned j = 0;
639
640     pa_assert(u);
641     pa_source_assert_ref(u->source);
642
643     if (u->use_tsched)
644         hw_sleep_time(u, &max_sleep_usec, &process_usec);
645
646     for (;;) {
647         snd_pcm_sframes_t n;
648         size_t n_bytes;
649         int r;
650         pa_bool_t after_avail = TRUE;
651
652         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
653
654             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
655                 continue;
656
657             return r;
658         }
659
660         n_bytes = (size_t) n * u->frame_size;
661         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
662         on_timeout = FALSE;
663
664         if (u->use_tsched)
665             if (!polled &&
666                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
667                 break;
668
669         if (PA_UNLIKELY(n_bytes <= 0)) {
670
671             if (polled)
672                 PA_ONCE_BEGIN {
673                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
674                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
677                            pa_strnull(dn));
678                     pa_xfree(dn);
679                 } PA_ONCE_END;
680
681             break;
682         }
683
684         if (++j > 10) {
685 #ifdef DEBUG_TIMING
686             pa_log_debug("Not filling up, because already too many iterations.");
687 #endif
688
689             break;
690         }
691
692         polled = FALSE;
693
694         for (;;) {
695             void *p;
696             snd_pcm_sframes_t frames;
697             pa_memchunk chunk;
698
699             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
700
701             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
702
703             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
704                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
705
706 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
707
708             p = pa_memblock_acquire(chunk.memblock);
709             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
710             pa_memblock_release(chunk.memblock);
711
712             if (PA_UNLIKELY(frames < 0)) {
713                 pa_memblock_unref(chunk.memblock);
714
715                 if (!after_avail && (int) frames == -EAGAIN)
716                     break;
717
718                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
719                     continue;
720
721                 return r;
722             }
723
724             if (!after_avail && frames == 0) {
725                 pa_memblock_unref(chunk.memblock);
726                 break;
727             }
728
729             pa_assert(frames > 0);
730             after_avail = FALSE;
731
732             chunk.index = 0;
733             chunk.length = (size_t) frames * u->frame_size;
734
735             pa_source_post(u->source, &chunk);
736             pa_memblock_unref(chunk.memblock);
737
738             work_done = TRUE;
739
740             u->read_count += frames * u->frame_size;
741
742 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
743
744             if ((size_t) frames * u->frame_size >= n_bytes)
745                 break;
746
747             n_bytes -= (size_t) frames * u->frame_size;
748         }
749     }
750
751     if (u->use_tsched) {
752         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
753         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
754
755         if (*sleep_usec > process_usec)
756             *sleep_usec -= process_usec;
757         else
758             *sleep_usec = 0;
759     }
760
761     return work_done ? 1 : 0;
762 }
763
764 static void update_smoother(struct userdata *u) {
765     snd_pcm_sframes_t delay = 0;
766     uint64_t position;
767     int err;
768     pa_usec_t now1 = 0, now2;
769     snd_pcm_status_t *status;
770
771     snd_pcm_status_alloca(&status);
772
773     pa_assert(u);
774     pa_assert(u->pcm_handle);
775
776     /* Let's update the time smoother */
777
778     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
779         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
780         return;
781     }
782
783     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
784         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
785     else {
786         snd_htimestamp_t htstamp = { 0, 0 };
787         snd_pcm_status_get_htstamp(status, &htstamp);
788         now1 = pa_timespec_load(&htstamp);
789     }
790
791     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
792     if (now1 <= 0)
793         now1 = pa_rtclock_now();
794
795     /* check if the time since the last update is bigger than the interval */
796     if (u->last_smoother_update > 0)
797         if (u->last_smoother_update + u->smoother_interval > now1)
798             return;
799
800     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
801     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
802
803     pa_smoother_put(u->smoother, now1, now2);
804
805     u->last_smoother_update = now1;
806     /* exponentially increase the update interval up to the MAX limit */
807     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
808 }
809
810 static pa_usec_t source_get_latency(struct userdata *u) {
811     int64_t delay;
812     pa_usec_t now1, now2;
813
814     pa_assert(u);
815
816     now1 = pa_rtclock_now();
817     now2 = pa_smoother_get(u->smoother, now1);
818
819     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
820
821     return delay >= 0 ? (pa_usec_t) delay : 0;
822 }
823
824 static int build_pollfd(struct userdata *u) {
825     pa_assert(u);
826     pa_assert(u->pcm_handle);
827
828     if (u->alsa_rtpoll_item)
829         pa_rtpoll_item_free(u->alsa_rtpoll_item);
830
831     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
832         return -1;
833
834     return 0;
835 }
836
837 /* Called from IO context */
838 static int suspend(struct userdata *u) {
839     pa_assert(u);
840     pa_assert(u->pcm_handle);
841
842     pa_smoother_pause(u->smoother, pa_rtclock_now());
843
844     /* Let's suspend */
845     snd_pcm_close(u->pcm_handle);
846     u->pcm_handle = NULL;
847
848     if (u->alsa_rtpoll_item) {
849         pa_rtpoll_item_free(u->alsa_rtpoll_item);
850         u->alsa_rtpoll_item = NULL;
851     }
852
853     pa_log_info("Device suspended...");
854
855     return 0;
856 }
857
858 /* Called from IO context */
859 static int update_sw_params(struct userdata *u) {
860     snd_pcm_uframes_t avail_min;
861     int err;
862
863     pa_assert(u);
864
865     /* Use the full buffer if no one asked us for anything specific */
866     u->hwbuf_unused = 0;
867
868     if (u->use_tsched) {
869         pa_usec_t latency;
870
871         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
872             size_t b;
873
874             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
875
876             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
877
878             /* We need at least one sample in our buffer */
879
880             if (PA_UNLIKELY(b < u->frame_size))
881                 b = u->frame_size;
882
883             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
884         }
885
886         fix_min_sleep_wakeup(u);
887         fix_tsched_watermark(u);
888     }
889
890     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
891
892     avail_min = 1;
893
894     if (u->use_tsched) {
895         pa_usec_t sleep_usec, process_usec;
896
897         hw_sleep_time(u, &sleep_usec, &process_usec);
898         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
899     }
900
901     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
902
903     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
904         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
905         return err;
906     }
907
908     return 0;
909 }
910
911 /* Called from IO Context on unsuspend or from main thread when creating source */
912 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
913                             pa_bool_t in_thread)
914 {
915     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
916                                                     &u->source->sample_spec);
917
918     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
919     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
920
921     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
922     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
923
924     fix_min_sleep_wakeup(u);
925     fix_tsched_watermark(u);
926
927     if (in_thread)
928         pa_source_set_latency_range_within_thread(u->source,
929                                                   u->min_latency_ref,
930                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
931     else {
932         pa_source_set_latency_range(u->source,
933                                     0,
934                                     pa_bytes_to_usec(u->hwbuf_size, ss));
935
936         /* work-around assert in pa_source_set_latency_within_thead,
937            keep track of min_latency and reuse it when
938            this routine is called from IO context */
939         u->min_latency_ref = u->source->thread_info.min_latency;
940     }
941
942     pa_log_info("Time scheduling watermark is %0.2fms",
943                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
944 }
945
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
948     pa_sample_spec ss;
949     int err;
950     pa_bool_t b, d;
951     snd_pcm_uframes_t period_size, buffer_size;
952
953     pa_assert(u);
954     pa_assert(!u->pcm_handle);
955
956     pa_log_info("Trying resume...");
957
958     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
959                             SND_PCM_NONBLOCK|
960                             SND_PCM_NO_AUTO_RESAMPLE|
961                             SND_PCM_NO_AUTO_CHANNELS|
962                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
963         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
964         goto fail;
965     }
966
967     ss = u->source->sample_spec;
968     period_size = u->fragment_size / u->frame_size;
969     buffer_size = u->hwbuf_size / u->frame_size;
970     b = u->use_mmap;
971     d = u->use_tsched;
972
973     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
974         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
975         goto fail;
976     }
977
978     if (b != u->use_mmap || d != u->use_tsched) {
979         pa_log_warn("Resume failed, couldn't get original access mode.");
980         goto fail;
981     }
982
983     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
984         pa_log_warn("Resume failed, couldn't restore original sample settings.");
985         goto fail;
986     }
987
988     if (period_size*u->frame_size != u->fragment_size ||
989         buffer_size*u->frame_size != u->hwbuf_size) {
990         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
993         goto fail;
994     }
995
996     if (update_sw_params(u) < 0)
997         goto fail;
998
999     if (build_pollfd(u) < 0)
1000         goto fail;
1001
1002     /* FIXME: We need to reload the volume somehow */
1003
1004     u->read_count = 0;
1005     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1006     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1007     u->last_smoother_update = 0;
1008
1009     u->first = TRUE;
1010
1011     /* reset the watermark to the value defined when source was created */
1012     if (u->use_tsched)
1013         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1014
1015     pa_log_info("Resumed successfully...");
1016
1017     return 0;
1018
1019 fail:
1020     if (u->pcm_handle) {
1021         snd_pcm_close(u->pcm_handle);
1022         u->pcm_handle = NULL;
1023     }
1024
1025     return -PA_ERR_IO;
1026 }
1027
1028 /* Called from IO context */
1029 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1030     struct userdata *u = PA_SOURCE(o)->userdata;
1031
1032     switch (code) {
1033
1034         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1035             pa_usec_t r = 0;
1036
1037             if (u->pcm_handle)
1038                 r = source_get_latency(u);
1039
1040             *((pa_usec_t*) data) = r;
1041
1042             return 0;
1043         }
1044
1045         case PA_SOURCE_MESSAGE_SET_STATE:
1046
1047             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1048
1049                 case PA_SOURCE_SUSPENDED: {
1050                     int r;
1051
1052                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1053
1054                     if ((r = suspend(u)) < 0)
1055                         return r;
1056
1057                     break;
1058                 }
1059
1060                 case PA_SOURCE_IDLE:
1061                 case PA_SOURCE_RUNNING: {
1062                     int r;
1063
1064                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1065                         if (build_pollfd(u) < 0)
1066                             return -PA_ERR_IO;
1067                     }
1068
1069                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1070                         if ((r = unsuspend(u)) < 0)
1071                             return r;
1072                     }
1073
1074                     break;
1075                 }
1076
1077                 case PA_SOURCE_UNLINKED:
1078                 case PA_SOURCE_INIT:
1079                 case PA_SOURCE_INVALID_STATE:
1080                     ;
1081             }
1082
1083             break;
1084     }
1085
1086     return pa_source_process_msg(o, code, data, offset, chunk);
1087 }
1088
1089 /* Called from main context */
1090 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1091     pa_source_state_t old_state;
1092     struct userdata *u;
1093
1094     pa_source_assert_ref(s);
1095     pa_assert_se(u = s->userdata);
1096
1097     old_state = pa_source_get_state(u->source);
1098
1099     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1100         reserve_done(u);
1101     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1102         if (reserve_init(u, u->device_name) < 0)
1103             return -PA_ERR_BUSY;
1104
1105     return 0;
1106 }
1107
1108 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1109     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1110
1111     pa_assert(u);
1112     pa_assert(u->mixer_handle);
1113
1114     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1115         return 0;
1116
1117     if (!PA_SOURCE_IS_LINKED(u->source->state))
1118         return 0;
1119
1120     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1121         pa_source_set_mixer_dirty(u->source, TRUE);
1122         return 0;
1123     }
1124
1125     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1126         pa_source_get_volume(u->source, TRUE);
1127         pa_source_get_mute(u->source, TRUE);
1128     }
1129
1130     return 0;
1131 }
1132
1133 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1134     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1135
1136     pa_assert(u);
1137     pa_assert(u->mixer_handle);
1138
1139     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1140         return 0;
1141
1142     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1143         pa_source_set_mixer_dirty(u->source, TRUE);
1144         return 0;
1145     }
1146
1147     if (mask & SND_CTL_EVENT_MASK_VALUE)
1148         pa_source_update_volume_and_mute(u->source);
1149
1150     return 0;
1151 }
1152
1153 static void source_get_volume_cb(pa_source *s) {
1154     struct userdata *u = s->userdata;
1155     pa_cvolume r;
1156     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1157
1158     pa_assert(u);
1159     pa_assert(u->mixer_path);
1160     pa_assert(u->mixer_handle);
1161
1162     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1163         return;
1164
1165     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1166     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1167
1168     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1169
1170     if (u->mixer_path->has_dB) {
1171         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1172
1173         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1174     }
1175
1176     if (pa_cvolume_equal(&u->hardware_volume, &r))
1177         return;
1178
1179     s->real_volume = u->hardware_volume = r;
1180
1181     /* Hmm, so the hardware volume changed, let's reset our software volume */
1182     if (u->mixer_path->has_dB)
1183         pa_source_set_soft_volume(s, NULL);
1184 }
1185
1186 static void source_set_volume_cb(pa_source *s) {
1187     struct userdata *u = s->userdata;
1188     pa_cvolume r;
1189     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1190     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1191
1192     pa_assert(u);
1193     pa_assert(u->mixer_path);
1194     pa_assert(u->mixer_handle);
1195
1196     /* Shift up by the base volume */
1197     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1198
1199     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1200         return;
1201
1202     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1203     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1204
1205     u->hardware_volume = r;
1206
1207     if (u->mixer_path->has_dB) {
1208         pa_cvolume new_soft_volume;
1209         pa_bool_t accurate_enough;
1210         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1211
1212         /* Match exactly what the user requested by software */
1213         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1214
1215         /* If the adjustment to do in software is only minimal we
1216          * can skip it. That saves us CPU at the expense of a bit of
1217          * accuracy */
1218         accurate_enough =
1219             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1220             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1221
1222         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1223         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1224         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1225         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1226         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1227                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1228                      pa_yes_no(accurate_enough));
1229         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1230
1231         if (!accurate_enough)
1232             s->soft_volume = new_soft_volume;
1233
1234     } else {
1235         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1236
1237         /* We can't match exactly what the user requested, hence let's
1238          * at least tell the user about it */
1239
1240         s->real_volume = r;
1241     }
1242 }
1243
1244 static void source_write_volume_cb(pa_source *s) {
1245     struct userdata *u = s->userdata;
1246     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1247
1248     pa_assert(u);
1249     pa_assert(u->mixer_path);
1250     pa_assert(u->mixer_handle);
1251     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1252
1253     /* Shift up by the base volume */
1254     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1255
1256     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1257         pa_log_error("Writing HW volume failed");
1258     else {
1259         pa_cvolume tmp_vol;
1260         pa_bool_t accurate_enough;
1261
1262         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1263         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1264
1265         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1266         accurate_enough =
1267             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1268             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1269
1270         if (!accurate_enough) {
1271             union {
1272                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1273                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1274             } vol;
1275
1276             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1277                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1278                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1279             pa_log_debug("                                           in dB: %s (request) != %s",
1280                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1281                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1282         }
1283     }
1284 }
1285
1286 static void source_get_mute_cb(pa_source *s) {
1287     struct userdata *u = s->userdata;
1288     pa_bool_t b;
1289
1290     pa_assert(u);
1291     pa_assert(u->mixer_path);
1292     pa_assert(u->mixer_handle);
1293
1294     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1295         return;
1296
1297     s->muted = b;
1298 }
1299
1300 static void source_set_mute_cb(pa_source *s) {
1301     struct userdata *u = s->userdata;
1302
1303     pa_assert(u);
1304     pa_assert(u->mixer_path);
1305     pa_assert(u->mixer_handle);
1306
1307     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1308 }
1309
1310 static void mixer_volume_init(struct userdata *u) {
1311     pa_assert(u);
1312
1313     if (!u->mixer_path->has_volume) {
1314         pa_source_set_write_volume_callback(u->source, NULL);
1315         pa_source_set_get_volume_callback(u->source, NULL);
1316         pa_source_set_set_volume_callback(u->source, NULL);
1317
1318         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1319     } else {
1320         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1321         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1322
1323         if (u->mixer_path->has_dB && u->deferred_volume) {
1324             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1325             pa_log_info("Successfully enabled deferred volume.");
1326         } else
1327             pa_source_set_write_volume_callback(u->source, NULL);
1328
1329         if (u->mixer_path->has_dB) {
1330             pa_source_enable_decibel_volume(u->source, TRUE);
1331             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1332
1333             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1334             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1335
1336             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1337         } else {
1338             pa_source_enable_decibel_volume(u->source, FALSE);
1339             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1340
1341             u->source->base_volume = PA_VOLUME_NORM;
1342             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1343         }
1344
1345         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1346     }
1347
1348     if (!u->mixer_path->has_mute) {
1349         pa_source_set_get_mute_callback(u->source, NULL);
1350         pa_source_set_set_mute_callback(u->source, NULL);
1351         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1352     } else {
1353         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1354         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1355         pa_log_info("Using hardware mute control.");
1356     }
1357 }
1358
1359 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1360     struct userdata *u = s->userdata;
1361
1362     pa_assert(u);
1363     pa_assert(p);
1364     pa_assert(u->ucm_context);
1365
1366     return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1367 }
1368
1369 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1370     struct userdata *u = s->userdata;
1371     pa_alsa_port_data *data;
1372
1373     pa_assert(u);
1374     pa_assert(p);
1375     pa_assert(u->mixer_handle);
1376
1377     data = PA_DEVICE_PORT_DATA(p);
1378
1379     pa_assert_se(u->mixer_path = data->path);
1380     pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1381
1382     mixer_volume_init(u);
1383
1384     if (s->set_mute)
1385         s->set_mute(s);
1386     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1387         if (s->write_volume)
1388             s->write_volume(s);
1389     } else {
1390         if (s->set_volume)
1391             s->set_volume(s);
1392     }
1393
1394     return 0;
1395 }
1396
1397 static void source_update_requested_latency_cb(pa_source *s) {
1398     struct userdata *u = s->userdata;
1399     pa_assert(u);
1400     pa_assert(u->use_tsched); /* only when timer scheduling is used
1401                                * we can dynamically adjust the
1402                                * latency */
1403
1404     if (!u->pcm_handle)
1405         return;
1406
1407     update_sw_params(u);
1408 }
1409
1410 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1411 {
1412     struct userdata *u = s->userdata;
1413     int i;
1414     pa_bool_t supported = FALSE;
1415
1416     pa_assert(u);
1417
1418     for (i = 0; u->rates[i]; i++) {
1419         if (u->rates[i] == rate) {
1420             supported = TRUE;
1421             break;
1422         }
1423     }
1424
1425     if (!supported) {
1426         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1427         return FALSE;
1428     }
1429
1430     if (!PA_SOURCE_IS_OPENED(s->state)) {
1431         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1432         u->source->sample_spec.rate = rate;
1433         return TRUE;
1434     }
1435
1436     return FALSE;
1437 }
1438
1439 static void thread_func(void *userdata) {
1440     struct userdata *u = userdata;
1441     unsigned short revents = 0;
1442
1443     pa_assert(u);
1444
1445     pa_log_debug("Thread starting up");
1446
1447     if (u->core->realtime_scheduling)
1448         pa_make_realtime(u->core->realtime_priority);
1449
1450     pa_thread_mq_install(&u->thread_mq);
1451
1452     for (;;) {
1453         int ret;
1454         pa_usec_t rtpoll_sleep = 0, real_sleep;
1455
1456 #ifdef DEBUG_TIMING
1457         pa_log_debug("Loop");
1458 #endif
1459
1460         /* Read some data and pass it to the sources */
1461         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1462             int work_done;
1463             pa_usec_t sleep_usec = 0;
1464             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1465
1466             if (u->first) {
1467                 pa_log_info("Starting capture.");
1468                 snd_pcm_start(u->pcm_handle);
1469
1470                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1471
1472                 u->first = FALSE;
1473             }
1474
1475             if (u->use_mmap)
1476                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1477             else
1478                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1479
1480             if (work_done < 0)
1481                 goto fail;
1482
1483 /*             pa_log_debug("work_done = %i", work_done); */
1484
1485             if (work_done)
1486                 update_smoother(u);
1487
1488             if (u->use_tsched) {
1489                 pa_usec_t cusec;
1490
1491                 /* OK, the capture buffer is now empty, let's
1492                  * calculate when to wake up next */
1493
1494 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1495
1496                 /* Convert from the sound card time domain to the
1497                  * system time domain */
1498                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1499
1500 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1501
1502                 /* We don't trust the conversion, so we wake up whatever comes first */
1503                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1504             }
1505         }
1506
1507         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1508             pa_usec_t volume_sleep;
1509             pa_source_volume_change_apply(u->source, &volume_sleep);
1510             if (volume_sleep > 0) {
1511                 if (rtpoll_sleep > 0)
1512                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1513                 else
1514                     rtpoll_sleep = volume_sleep;
1515             }
1516         }
1517
1518         if (rtpoll_sleep > 0) {
1519             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1520             real_sleep = pa_rtclock_now();
1521         }
1522         else
1523             pa_rtpoll_set_timer_disabled(u->rtpoll);
1524
1525         /* Hmm, nothing to do. Let's sleep */
1526         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1527             goto fail;
1528
1529         if (rtpoll_sleep > 0) {
1530             real_sleep = pa_rtclock_now() - real_sleep;
1531 #ifdef DEBUG_TIMING
1532             pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1533                 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1534                 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1535 #endif
1536             if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1537                 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1538                     (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1539         }
1540
1541         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1542             pa_source_volume_change_apply(u->source, NULL);
1543
1544         if (ret == 0)
1545             goto finish;
1546
1547         /* Tell ALSA about this and process its response */
1548         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1549             struct pollfd *pollfd;
1550             int err;
1551             unsigned n;
1552
1553             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1554
1555             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1556                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1557                 goto fail;
1558             }
1559
1560             if (revents & ~POLLIN) {
1561                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1562                     goto fail;
1563
1564                 u->first = TRUE;
1565                 revents = 0;
1566             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1567                 pa_log_debug("Wakeup from ALSA!");
1568
1569         } else
1570             revents = 0;
1571     }
1572
1573 fail:
1574     /* If this was no regular exit from the loop we have to continue
1575      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1576     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1577     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1578
1579 finish:
1580     pa_log_debug("Thread shutting down");
1581 }
1582
1583 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1584     const char *n;
1585     char *t;
1586
1587     pa_assert(data);
1588     pa_assert(ma);
1589     pa_assert(device_name);
1590
1591     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1592         pa_source_new_data_set_name(data, n);
1593         data->namereg_fail = TRUE;
1594         return;
1595     }
1596
1597     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1598         data->namereg_fail = TRUE;
1599     else {
1600         n = device_id ? device_id : device_name;
1601         data->namereg_fail = FALSE;
1602     }
1603
1604     if (mapping)
1605         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1606     else
1607         t = pa_sprintf_malloc("alsa_input.%s", n);
1608
1609     pa_source_new_data_set_name(data, t);
1610     pa_xfree(t);
1611 }
1612
1613 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1614     snd_hctl_t *hctl;
1615
1616     if (!mapping && !element)
1617         return;
1618
1619     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1620         pa_log_info("Failed to find a working mixer device.");
1621         return;
1622     }
1623
1624     if (element) {
1625
1626         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1627             goto fail;
1628
1629         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1630             goto fail;
1631
1632         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1633         pa_alsa_path_dump(u->mixer_path);
1634     } else if (!(u->mixer_path_set = mapping->input_path_set))
1635         goto fail;
1636
1637     return;
1638
1639 fail:
1640
1641     if (u->mixer_path) {
1642         pa_alsa_path_free(u->mixer_path);
1643         u->mixer_path = NULL;
1644     }
1645
1646     if (u->mixer_handle) {
1647         snd_mixer_close(u->mixer_handle);
1648         u->mixer_handle = NULL;
1649     }
1650 }
1651
1652 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1653     pa_bool_t need_mixer_callback = FALSE;
1654
1655     pa_assert(u);
1656
1657     if (!u->mixer_handle)
1658         return 0;
1659
1660     if (u->source->active_port) {
1661         pa_alsa_port_data *data;
1662
1663         /* We have a list of supported paths, so let's activate the
1664          * one that has been chosen as active */
1665
1666         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1667         u->mixer_path = data->path;
1668
1669         pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1670
1671     } else {
1672
1673         if (!u->mixer_path && u->mixer_path_set)
1674             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1675
1676         if (u->mixer_path) {
1677             /* Hmm, we have only a single path, then let's activate it */
1678
1679             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1680         } else
1681             return 0;
1682     }
1683
1684     mixer_volume_init(u);
1685
1686     /* Will we need to register callbacks? */
1687     if (u->mixer_path_set && u->mixer_path_set->paths) {
1688         pa_alsa_path *p;
1689         void *state;
1690
1691         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1692             if (p->has_volume || p->has_mute)
1693                 need_mixer_callback = TRUE;
1694         }
1695     }
1696     else if (u->mixer_path)
1697         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1698
1699     if (need_mixer_callback) {
1700         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1701         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1702             u->mixer_pd = pa_alsa_mixer_pdata_new();
1703             mixer_callback = io_mixer_callback;
1704
1705             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1706                 pa_log("Failed to initialize file descriptor monitoring");
1707                 return -1;
1708             }
1709         } else {
1710             u->mixer_fdl = pa_alsa_fdlist_new();
1711             mixer_callback = ctl_mixer_callback;
1712
1713             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1714                 pa_log("Failed to initialize file descriptor monitoring");
1715                 return -1;
1716             }
1717         }
1718
1719         if (u->mixer_path_set)
1720             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1721         else
1722             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1723     }
1724
1725     return 0;
1726 }
1727
1728 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1729
1730     struct userdata *u = NULL;
1731     const char *dev_id = NULL, *key, *mod_name;
1732     pa_sample_spec ss;
1733     uint32_t alternate_sample_rate;
1734     pa_channel_map map;
1735     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1736     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1737     size_t frame_size;
1738     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1739     pa_source_new_data data;
1740     pa_alsa_profile_set *profile_set = NULL;
1741     void *state = NULL;
1742
1743     pa_assert(m);
1744     pa_assert(ma);
1745
1746     ss = m->core->default_sample_spec;
1747     map = m->core->default_channel_map;
1748     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1749         pa_log("Failed to parse sample specification and channel map");
1750         goto fail;
1751     }
1752
1753     alternate_sample_rate = m->core->alternate_sample_rate;
1754     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1755         pa_log("Failed to parse alternate sample rate");
1756         goto fail;
1757     }
1758
1759     frame_size = pa_frame_size(&ss);
1760
1761     nfrags = m->core->default_n_fragments;
1762     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1763     if (frag_size <= 0)
1764         frag_size = (uint32_t) frame_size;
1765     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1766     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1767
1768     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1769         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1770         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1771         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1772         pa_log("Failed to parse buffer metrics");
1773         goto fail;
1774     }
1775
1776     buffer_size = nfrags * frag_size;
1777
1778     period_frames = frag_size/frame_size;
1779     buffer_frames = buffer_size/frame_size;
1780     tsched_frames = tsched_size/frame_size;
1781
1782     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1783         pa_log("Failed to parse mmap argument.");
1784         goto fail;
1785     }
1786
1787     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1788         pa_log("Failed to parse tsched argument.");
1789         goto fail;
1790     }
1791
1792     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1793         pa_log("Failed to parse ignore_dB argument.");
1794         goto fail;
1795     }
1796
1797     deferred_volume = m->core->deferred_volume;
1798     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1799         pa_log("Failed to parse deferred_volume argument.");
1800         goto fail;
1801     }
1802
1803     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1804         pa_log("Failed to parse fixed_latency_range argument.");
1805         goto fail;
1806     }
1807
1808     use_tsched = pa_alsa_may_tsched(use_tsched);
1809
1810     u = pa_xnew0(struct userdata, 1);
1811     u->core = m->core;
1812     u->module = m;
1813     u->use_mmap = use_mmap;
1814     u->use_tsched = use_tsched;
1815     u->deferred_volume = deferred_volume;
1816     u->fixed_latency_range = fixed_latency_range;
1817     u->first = TRUE;
1818     u->rtpoll = pa_rtpoll_new();
1819     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1820
1821     u->smoother = pa_smoother_new(
1822             SMOOTHER_ADJUST_USEC,
1823             SMOOTHER_WINDOW_USEC,
1824             TRUE,
1825             TRUE,
1826             5,
1827             pa_rtclock_now(),
1828             TRUE);
1829     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1830
1831     /* use ucm */
1832     if (mapping && mapping->ucm_context.ucm)
1833         u->ucm_context = &mapping->ucm_context;
1834
1835     dev_id = pa_modargs_get_value(
1836             ma, "device_id",
1837             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1838
1839     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1840
1841     if (reserve_init(u, dev_id) < 0)
1842         goto fail;
1843
1844     if (reserve_monitor_init(u, dev_id) < 0)
1845         goto fail;
1846
1847     b = use_mmap;
1848     d = use_tsched;
1849
1850     if (mapping) {
1851
1852         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1853             pa_log("device_id= not set");
1854             goto fail;
1855         }
1856
1857         if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1858             if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1859                 pa_log("Failed to enable ucm modifier %s", mod_name);
1860             else
1861                 pa_log_debug("Enabled ucm modifier %s", mod_name);
1862         }
1863
1864         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1865                       dev_id,
1866                       &u->device_name,
1867                       &ss, &map,
1868                       SND_PCM_STREAM_CAPTURE,
1869                       &period_frames, &buffer_frames, tsched_frames,
1870                       &b, &d, mapping)))
1871             goto fail;
1872
1873     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1874
1875         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1876             goto fail;
1877
1878         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1879                       dev_id,
1880                       &u->device_name,
1881                       &ss, &map,
1882                       SND_PCM_STREAM_CAPTURE,
1883                       &period_frames, &buffer_frames, tsched_frames,
1884                       &b, &d, profile_set, &mapping)))
1885             goto fail;
1886
1887     } else {
1888
1889         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1890                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1891                       &u->device_name,
1892                       &ss, &map,
1893                       SND_PCM_STREAM_CAPTURE,
1894                       &period_frames, &buffer_frames, tsched_frames,
1895                       &b, &d, FALSE)))
1896             goto fail;
1897     }
1898
1899     pa_assert(u->device_name);
1900     pa_log_info("Successfully opened device %s.", u->device_name);
1901
1902     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1903         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1904         goto fail;
1905     }
1906
1907     if (mapping)
1908         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1909
1910     if (use_mmap && !b) {
1911         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1912         u->use_mmap = use_mmap = FALSE;
1913     }
1914
1915     if (use_tsched && (!b || !d)) {
1916         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1917         u->use_tsched = use_tsched = FALSE;
1918     }
1919
1920     if (u->use_mmap)
1921         pa_log_info("Successfully enabled mmap() mode.");
1922
1923     if (u->use_tsched) {
1924         pa_log_info("Successfully enabled timer-based scheduling mode.");
1925         if (u->fixed_latency_range)
1926             pa_log_info("Disabling latency range changes on overrun");
1927     }
1928
1929     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1930     if (!u->rates) {
1931         pa_log_error("Failed to find any supported sample rates.");
1932         goto fail;
1933     }
1934
1935     /* ALSA might tweak the sample spec, so recalculate the frame size */
1936     frame_size = pa_frame_size(&ss);
1937
1938     if (!u->ucm_context)
1939         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1940
1941     pa_source_new_data_init(&data);
1942     data.driver = driver;
1943     data.module = m;
1944     data.card = card;
1945     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1946
1947     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1948      * variable instead of using &data.namereg_fail directly, because
1949      * data.namereg_fail is a bitfield and taking the address of a bitfield
1950      * variable is impossible. */
1951     namereg_fail = data.namereg_fail;
1952     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1953         pa_log("Failed to parse namereg_fail argument.");
1954         pa_source_new_data_done(&data);
1955         goto fail;
1956     }
1957     data.namereg_fail = namereg_fail;
1958
1959     pa_source_new_data_set_sample_spec(&data, &ss);
1960     pa_source_new_data_set_channel_map(&data, &map);
1961     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1962
1963     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1964     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1965     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1966     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1967     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1968
1969     if (mapping) {
1970         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1971         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1972
1973         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1974             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1975     }
1976
1977     pa_alsa_init_description(data.proplist);
1978
1979     if (u->control_device)
1980         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1981
1982     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1983         pa_log("Invalid properties");
1984         pa_source_new_data_done(&data);
1985         goto fail;
1986     }
1987
1988     if (u->ucm_context)
1989         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1990     else if (u->mixer_path_set)
1991         pa_alsa_add_ports(&data, u->mixer_path_set, card);
1992
1993     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1994     pa_source_new_data_done(&data);
1995
1996     if (!u->source) {
1997         pa_log("Failed to create source object");
1998         goto fail;
1999     }
2000
2001     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2002                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
2003         pa_log("Failed to parse deferred_volume_safety_margin parameter");
2004         goto fail;
2005     }
2006
2007     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2008                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
2009         pa_log("Failed to parse deferred_volume_extra_delay parameter");
2010         goto fail;
2011     }
2012
2013     u->source->parent.process_msg = source_process_msg;
2014     if (u->use_tsched)
2015         u->source->update_requested_latency = source_update_requested_latency_cb;
2016     u->source->set_state = source_set_state_cb;
2017     if (u->ucm_context)
2018         u->source->set_port = source_set_port_ucm_cb;
2019     else
2020         u->source->set_port = source_set_port_cb;
2021     if (u->source->alternate_sample_rate)
2022         u->source->update_rate = source_update_rate_cb;
2023     u->source->userdata = u;
2024
2025     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2026     pa_source_set_rtpoll(u->source, u->rtpoll);
2027
2028     u->frame_size = frame_size;
2029     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2030     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2031     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2032
2033     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2034                 (double) u->hwbuf_size / (double) u->fragment_size,
2035                 (long unsigned) u->fragment_size,
2036                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2037                 (long unsigned) u->hwbuf_size,
2038                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2039
2040     if (u->use_tsched) {
2041         u->tsched_watermark_ref = tsched_watermark;
2042         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2043     }
2044     else
2045         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2046
2047     reserve_update(u);
2048
2049     if (update_sw_params(u) < 0)
2050         goto fail;
2051
2052     if (u->ucm_context) {
2053         if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2054             goto fail;
2055     } else if (setup_mixer(u, ignore_dB) < 0)
2056         goto fail;
2057
2058     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2059
2060     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2061         pa_log("Failed to create thread.");
2062         goto fail;
2063     }
2064
2065     /* Get initial mixer settings */
2066     if (data.volume_is_set) {
2067         if (u->source->set_volume)
2068             u->source->set_volume(u->source);
2069     } else {
2070         if (u->source->get_volume)
2071             u->source->get_volume(u->source);
2072     }
2073
2074     if (data.muted_is_set) {
2075         if (u->source->set_mute)
2076             u->source->set_mute(u->source);
2077     } else {
2078         if (u->source->get_mute)
2079             u->source->get_mute(u->source);
2080     }
2081
2082     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2083         u->source->write_volume(u->source);
2084
2085     pa_source_put(u->source);
2086
2087     if (profile_set)
2088         pa_alsa_profile_set_free(profile_set);
2089
2090     return u->source;
2091
2092 fail:
2093
2094     if (u)
2095         userdata_free(u);
2096
2097     if (profile_set)
2098         pa_alsa_profile_set_free(profile_set);
2099
2100     return NULL;
2101 }
2102
2103 static void userdata_free(struct userdata *u) {
2104     pa_assert(u);
2105
2106     if (u->source)
2107         pa_source_unlink(u->source);
2108
2109     if (u->thread) {
2110         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2111         pa_thread_free(u->thread);
2112     }
2113
2114     pa_thread_mq_done(&u->thread_mq);
2115
2116     if (u->source)
2117         pa_source_unref(u->source);
2118
2119     if (u->mixer_pd)
2120         pa_alsa_mixer_pdata_free(u->mixer_pd);
2121
2122     if (u->alsa_rtpoll_item)
2123         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2124
2125     if (u->rtpoll)
2126         pa_rtpoll_free(u->rtpoll);
2127
2128     if (u->pcm_handle) {
2129         snd_pcm_drop(u->pcm_handle);
2130         snd_pcm_close(u->pcm_handle);
2131     }
2132
2133     if (u->mixer_fdl)
2134         pa_alsa_fdlist_free(u->mixer_fdl);
2135
2136     if (u->mixer_path && !u->mixer_path_set)
2137         pa_alsa_path_free(u->mixer_path);
2138
2139     if (u->mixer_handle)
2140         snd_mixer_close(u->mixer_handle);
2141
2142     if (u->smoother)
2143         pa_smoother_free(u->smoother);
2144
2145     if (u->rates)
2146         pa_xfree(u->rates);
2147
2148     reserve_done(u);
2149     monitor_done(u);
2150
2151     pa_xfree(u->device_name);
2152     pa_xfree(u->control_device);
2153     pa_xfree(u->paths_dir);
2154     pa_xfree(u);
2155 }
2156
2157 void pa_alsa_source_free(pa_source *s) {
2158     struct userdata *u;
2159
2160     pa_source_assert_ref(s);
2161     pa_assert_se(u = s->userdata);
2162
2163     userdata_free(u);
2164 }