alsa: Add a proplist to mappings
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
76 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84     pa_core *core;
85     pa_module *module;
86     pa_source *source;
87
88     pa_thread *thread;
89     pa_thread_mq thread_mq;
90     pa_rtpoll *rtpoll;
91
92     snd_pcm_t *pcm_handle;
93
94     char *paths_dir;
95     pa_alsa_fdlist *mixer_fdl;
96     pa_alsa_mixer_pdata *mixer_pd;
97     snd_mixer_t *mixer_handle;
98     pa_alsa_path_set *mixer_path_set;
99     pa_alsa_path *mixer_path;
100
101     pa_cvolume hardware_volume;
102
103     unsigned int *rates;
104
105     size_t
106         frame_size,
107         fragment_size,
108         hwbuf_size,
109         tsched_watermark,
110         tsched_watermark_ref,
111         hwbuf_unused,
112         min_sleep,
113         min_wakeup,
114         watermark_inc_step,
115         watermark_dec_step,
116         watermark_inc_threshold,
117         watermark_dec_threshold;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121
122     char *device_name;  /* name of the PCM device */
123     char *control_device; /* name of the control device */
124
125     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
126
127     pa_bool_t first;
128
129     pa_rtpoll_item *alsa_rtpoll_item;
130
131     pa_smoother *smoother;
132     uint64_t read_count;
133     pa_usec_t smoother_interval;
134     pa_usec_t last_smoother_update;
135
136     pa_reserve_wrapper *reserve;
137     pa_hook_slot *reserve_slot;
138     pa_reserve_monitor_wrapper *monitor;
139     pa_hook_slot *monitor_slot;
140
141     /* ucm context */
142     pa_alsa_ucm_mapping_context *ucm_context;
143 };
144
145 static void userdata_free(struct userdata *u);
146
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
148     pa_assert(r);
149     pa_assert(u);
150
151     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
152         return PA_HOOK_CANCEL;
153
154     return PA_HOOK_OK;
155 }
156
157 static void reserve_done(struct userdata *u) {
158     pa_assert(u);
159
160     if (u->reserve_slot) {
161         pa_hook_slot_free(u->reserve_slot);
162         u->reserve_slot = NULL;
163     }
164
165     if (u->reserve) {
166         pa_reserve_wrapper_unref(u->reserve);
167         u->reserve = NULL;
168     }
169 }
170
171 static void reserve_update(struct userdata *u) {
172     const char *description;
173     pa_assert(u);
174
175     if (!u->source || !u->reserve)
176         return;
177
178     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
180 }
181
182 static int reserve_init(struct userdata *u, const char *dname) {
183     char *rname;
184
185     pa_assert(u);
186     pa_assert(dname);
187
188     if (u->reserve)
189         return 0;
190
191     if (pa_in_system_mode())
192         return 0;
193
194     if (!(rname = pa_alsa_get_reserve_name(dname)))
195         return 0;
196
197     /* We are resuming, try to lock the device */
198     u->reserve = pa_reserve_wrapper_get(u->core, rname);
199     pa_xfree(rname);
200
201     if (!(u->reserve))
202         return -1;
203
204     reserve_update(u);
205
206     pa_assert(!u->reserve_slot);
207     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
208
209     return 0;
210 }
211
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
213     pa_bool_t b;
214
215     pa_assert(w);
216     pa_assert(u);
217
218     b = PA_PTR_TO_UINT(busy) && !u->reserve;
219
220     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
221     return PA_HOOK_OK;
222 }
223
224 static void monitor_done(struct userdata *u) {
225     pa_assert(u);
226
227     if (u->monitor_slot) {
228         pa_hook_slot_free(u->monitor_slot);
229         u->monitor_slot = NULL;
230     }
231
232     if (u->monitor) {
233         pa_reserve_monitor_wrapper_unref(u->monitor);
234         u->monitor = NULL;
235     }
236 }
237
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
239     char *rname;
240
241     pa_assert(u);
242     pa_assert(dname);
243
244     if (pa_in_system_mode())
245         return 0;
246
247     if (!(rname = pa_alsa_get_reserve_name(dname)))
248         return 0;
249
250     /* We are resuming, try to lock the device */
251     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
252     pa_xfree(rname);
253
254     if (!(u->monitor))
255         return -1;
256
257     pa_assert(!u->monitor_slot);
258     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
259
260     return 0;
261 }
262
263 static void fix_min_sleep_wakeup(struct userdata *u) {
264     size_t max_use, max_use_2;
265
266     pa_assert(u);
267     pa_assert(u->use_tsched);
268
269     max_use = u->hwbuf_size - u->hwbuf_unused;
270     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
271
272     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
273     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
274
275     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
276     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
277 }
278
279 static void fix_tsched_watermark(struct userdata *u) {
280     size_t max_use;
281     pa_assert(u);
282     pa_assert(u->use_tsched);
283
284     max_use = u->hwbuf_size - u->hwbuf_unused;
285
286     if (u->tsched_watermark > max_use - u->min_sleep)
287         u->tsched_watermark = max_use - u->min_sleep;
288
289     if (u->tsched_watermark < u->min_wakeup)
290         u->tsched_watermark = u->min_wakeup;
291 }
292
293 static void increase_watermark(struct userdata *u) {
294     size_t old_watermark;
295     pa_usec_t old_min_latency, new_min_latency;
296
297     pa_assert(u);
298     pa_assert(u->use_tsched);
299
300     /* First, just try to increase the watermark */
301     old_watermark = u->tsched_watermark;
302     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
303     fix_tsched_watermark(u);
304
305     if (old_watermark != u->tsched_watermark) {
306         pa_log_info("Increasing wakeup watermark to %0.2f ms",
307                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
308         return;
309     }
310
311     /* Hmm, we cannot increase the watermark any further, hence let's
312      raise the latency unless doing so was disabled in
313      configuration */
314     if (u->fixed_latency_range)
315         return;
316
317     old_min_latency = u->source->thread_info.min_latency;
318     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
320
321     if (old_min_latency != new_min_latency) {
322         pa_log_info("Increasing minimal latency to %0.2f ms",
323                     (double) new_min_latency / PA_USEC_PER_MSEC);
324
325         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
326     }
327
328     /* When we reach this we're officialy fucked! */
329 }
330
331 static void decrease_watermark(struct userdata *u) {
332     size_t old_watermark;
333     pa_usec_t now;
334
335     pa_assert(u);
336     pa_assert(u->use_tsched);
337
338     now = pa_rtclock_now();
339
340     if (u->watermark_dec_not_before <= 0)
341         goto restart;
342
343     if (u->watermark_dec_not_before > now)
344         return;
345
346     old_watermark = u->tsched_watermark;
347
348     if (u->tsched_watermark < u->watermark_dec_step)
349         u->tsched_watermark = u->tsched_watermark / 2;
350     else
351         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
352
353     fix_tsched_watermark(u);
354
355     if (old_watermark != u->tsched_watermark)
356         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
358
359     /* We don't change the latency range*/
360
361 restart:
362     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
363 }
364
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
366     pa_usec_t wm, usec;
367
368     pa_assert(sleep_usec);
369     pa_assert(process_usec);
370
371     pa_assert(u);
372     pa_assert(u->use_tsched);
373
374     usec = pa_source_get_requested_latency_within_thread(u->source);
375
376     if (usec == (pa_usec_t) -1)
377         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
378
379     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
380
381     if (wm > usec)
382         wm = usec/2;
383
384     *sleep_usec = usec - wm;
385     *process_usec = wm;
386
387 #ifdef DEBUG_TIMING
388     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389                  (unsigned long) (usec / PA_USEC_PER_MSEC),
390                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
392 #endif
393 }
394
395 static int try_recover(struct userdata *u, const char *call, int err) {
396     pa_assert(u);
397     pa_assert(call);
398     pa_assert(err < 0);
399
400     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
401
402     pa_assert(err != -EAGAIN);
403
404     if (err == -EPIPE)
405         pa_log_debug("%s: Buffer overrun!", call);
406
407     if (err == -ESTRPIPE)
408         pa_log_debug("%s: System suspended!", call);
409
410     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411         pa_log("%s: %s", call, pa_alsa_strerror(err));
412         return -1;
413     }
414
415     u->first = TRUE;
416     return 0;
417 }
418
419 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
420     size_t left_to_record;
421     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
422     pa_bool_t overrun = FALSE;
423
424     /* We use <= instead of < for this check here because an overrun
425      * only happens after the last sample was processed, not already when
426      * it is removed from the buffer. This is particularly important
427      * when block transfer is used. */
428
429     if (n_bytes <= rec_space)
430         left_to_record = rec_space - n_bytes;
431     else {
432
433         /* We got a dropout. What a mess! */
434         left_to_record = 0;
435         overrun = TRUE;
436
437 #ifdef DEBUG_TIMING
438         PA_DEBUG_TRAP;
439 #endif
440
441         if (pa_log_ratelimit(PA_LOG_INFO))
442             pa_log_info("Overrun!");
443     }
444
445 #ifdef DEBUG_TIMING
446     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449     if (u->use_tsched) {
450         pa_bool_t reset_not_before = TRUE;
451
452         if (overrun || left_to_record < u->watermark_inc_threshold)
453             increase_watermark(u);
454         else if (left_to_record > u->watermark_dec_threshold) {
455             reset_not_before = FALSE;
456
457             /* We decrease the watermark only if have actually
458              * been woken up by a timeout. If something else woke
459              * us up it's too easy to fulfill the deadlines... */
460
461             if (on_timeout)
462                 decrease_watermark(u);
463         }
464
465         if (reset_not_before)
466             u->watermark_dec_not_before = 0;
467     }
468
469     return left_to_record;
470 }
471
472 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473     pa_bool_t work_done = FALSE;
474     pa_usec_t max_sleep_usec = 0, process_usec = 0;
475     size_t left_to_record;
476     unsigned j = 0;
477
478     pa_assert(u);
479     pa_source_assert_ref(u->source);
480
481     if (u->use_tsched)
482         hw_sleep_time(u, &max_sleep_usec, &process_usec);
483
484     for (;;) {
485         snd_pcm_sframes_t n;
486         size_t n_bytes;
487         int r;
488         pa_bool_t after_avail = TRUE;
489
490         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
491
492             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
493                 continue;
494
495             return r;
496         }
497
498         n_bytes = (size_t) n * u->frame_size;
499
500 #ifdef DEBUG_TIMING
501         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
502 #endif
503
504         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
505         on_timeout = FALSE;
506
507         if (u->use_tsched)
508             if (!polled &&
509                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
510 #ifdef DEBUG_TIMING
511                 pa_log_debug("Not reading, because too early.");
512 #endif
513                 break;
514             }
515
516         if (PA_UNLIKELY(n_bytes <= 0)) {
517
518             if (polled)
519                 PA_ONCE_BEGIN {
520                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
521                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
524                            pa_strnull(dn));
525                     pa_xfree(dn);
526                 } PA_ONCE_END;
527
528 #ifdef DEBUG_TIMING
529             pa_log_debug("Not reading, because not necessary.");
530 #endif
531             break;
532         }
533
534
535         if (++j > 10) {
536 #ifdef DEBUG_TIMING
537             pa_log_debug("Not filling up, because already too many iterations.");
538 #endif
539
540             break;
541         }
542
543         polled = FALSE;
544
545 #ifdef DEBUG_TIMING
546         pa_log_debug("Reading");
547 #endif
548
549         for (;;) {
550             pa_memchunk chunk;
551             void *p;
552             int err;
553             const snd_pcm_channel_area_t *areas;
554             snd_pcm_uframes_t offset, frames;
555             snd_pcm_sframes_t sframes;
556
557             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
558 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
559
560             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
561
562                 if (!after_avail && err == -EAGAIN)
563                     break;
564
565                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
566                     continue;
567
568                 return r;
569             }
570
571             /* Make sure that if these memblocks need to be copied they will fit into one slot */
572             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
573                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
574
575             if (!after_avail && frames == 0)
576                 break;
577
578             pa_assert(frames > 0);
579             after_avail = FALSE;
580
581             /* Check these are multiples of 8 bit */
582             pa_assert((areas[0].first & 7) == 0);
583             pa_assert((areas[0].step & 7)== 0);
584
585             /* We assume a single interleaved memory buffer */
586             pa_assert((areas[0].first >> 3) == 0);
587             pa_assert((areas[0].step >> 3) == u->frame_size);
588
589             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
590
591             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
592             chunk.length = pa_memblock_get_length(chunk.memblock);
593             chunk.index = 0;
594
595             pa_source_post(u->source, &chunk);
596             pa_memblock_unref_fixed(chunk.memblock);
597
598             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
599
600                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
601                     continue;
602
603                 return r;
604             }
605
606             work_done = TRUE;
607
608             u->read_count += frames * u->frame_size;
609
610 #ifdef DEBUG_TIMING
611             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
612 #endif
613
614             if ((size_t) frames * u->frame_size >= n_bytes)
615                 break;
616
617             n_bytes -= (size_t) frames * u->frame_size;
618         }
619     }
620
621     if (u->use_tsched) {
622         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
623         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
624
625         if (*sleep_usec > process_usec)
626             *sleep_usec -= process_usec;
627         else
628             *sleep_usec = 0;
629     }
630
631     return work_done ? 1 : 0;
632 }
633
634 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
635     int work_done = FALSE;
636     pa_usec_t max_sleep_usec = 0, process_usec = 0;
637     size_t left_to_record;
638     unsigned j = 0;
639
640     pa_assert(u);
641     pa_source_assert_ref(u->source);
642
643     if (u->use_tsched)
644         hw_sleep_time(u, &max_sleep_usec, &process_usec);
645
646     for (;;) {
647         snd_pcm_sframes_t n;
648         size_t n_bytes;
649         int r;
650         pa_bool_t after_avail = TRUE;
651
652         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
653
654             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
655                 continue;
656
657             return r;
658         }
659
660         n_bytes = (size_t) n * u->frame_size;
661         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
662         on_timeout = FALSE;
663
664         if (u->use_tsched)
665             if (!polled &&
666                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
667                 break;
668
669         if (PA_UNLIKELY(n_bytes <= 0)) {
670
671             if (polled)
672                 PA_ONCE_BEGIN {
673                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
674                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
677                            pa_strnull(dn));
678                     pa_xfree(dn);
679                 } PA_ONCE_END;
680
681             break;
682         }
683
684         if (++j > 10) {
685 #ifdef DEBUG_TIMING
686             pa_log_debug("Not filling up, because already too many iterations.");
687 #endif
688
689             break;
690         }
691
692         polled = FALSE;
693
694         for (;;) {
695             void *p;
696             snd_pcm_sframes_t frames;
697             pa_memchunk chunk;
698
699             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
700
701             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
702
703             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
704                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
705
706 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
707
708             p = pa_memblock_acquire(chunk.memblock);
709             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
710             pa_memblock_release(chunk.memblock);
711
712             if (PA_UNLIKELY(frames < 0)) {
713                 pa_memblock_unref(chunk.memblock);
714
715                 if (!after_avail && (int) frames == -EAGAIN)
716                     break;
717
718                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
719                     continue;
720
721                 return r;
722             }
723
724             if (!after_avail && frames == 0) {
725                 pa_memblock_unref(chunk.memblock);
726                 break;
727             }
728
729             pa_assert(frames > 0);
730             after_avail = FALSE;
731
732             chunk.index = 0;
733             chunk.length = (size_t) frames * u->frame_size;
734
735             pa_source_post(u->source, &chunk);
736             pa_memblock_unref(chunk.memblock);
737
738             work_done = TRUE;
739
740             u->read_count += frames * u->frame_size;
741
742 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
743
744             if ((size_t) frames * u->frame_size >= n_bytes)
745                 break;
746
747             n_bytes -= (size_t) frames * u->frame_size;
748         }
749     }
750
751     if (u->use_tsched) {
752         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
753         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
754
755         if (*sleep_usec > process_usec)
756             *sleep_usec -= process_usec;
757         else
758             *sleep_usec = 0;
759     }
760
761     return work_done ? 1 : 0;
762 }
763
764 static void update_smoother(struct userdata *u) {
765     snd_pcm_sframes_t delay = 0;
766     uint64_t position;
767     int err;
768     pa_usec_t now1 = 0, now2;
769     snd_pcm_status_t *status;
770
771     snd_pcm_status_alloca(&status);
772
773     pa_assert(u);
774     pa_assert(u->pcm_handle);
775
776     /* Let's update the time smoother */
777
778     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
779         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
780         return;
781     }
782
783     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
784         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
785     else {
786         snd_htimestamp_t htstamp = { 0, 0 };
787         snd_pcm_status_get_htstamp(status, &htstamp);
788         now1 = pa_timespec_load(&htstamp);
789     }
790
791     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
792     if (now1 <= 0)
793         now1 = pa_rtclock_now();
794
795     /* check if the time since the last update is bigger than the interval */
796     if (u->last_smoother_update > 0)
797         if (u->last_smoother_update + u->smoother_interval > now1)
798             return;
799
800     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
801     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
802
803     pa_smoother_put(u->smoother, now1, now2);
804
805     u->last_smoother_update = now1;
806     /* exponentially increase the update interval up to the MAX limit */
807     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
808 }
809
810 static pa_usec_t source_get_latency(struct userdata *u) {
811     int64_t delay;
812     pa_usec_t now1, now2;
813
814     pa_assert(u);
815
816     now1 = pa_rtclock_now();
817     now2 = pa_smoother_get(u->smoother, now1);
818
819     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
820
821     return delay >= 0 ? (pa_usec_t) delay : 0;
822 }
823
824 static int build_pollfd(struct userdata *u) {
825     pa_assert(u);
826     pa_assert(u->pcm_handle);
827
828     if (u->alsa_rtpoll_item)
829         pa_rtpoll_item_free(u->alsa_rtpoll_item);
830
831     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
832         return -1;
833
834     return 0;
835 }
836
837 /* Called from IO context */
838 static int suspend(struct userdata *u) {
839     pa_assert(u);
840     pa_assert(u->pcm_handle);
841
842     pa_smoother_pause(u->smoother, pa_rtclock_now());
843
844     /* Let's suspend */
845     snd_pcm_close(u->pcm_handle);
846     u->pcm_handle = NULL;
847
848     if (u->alsa_rtpoll_item) {
849         pa_rtpoll_item_free(u->alsa_rtpoll_item);
850         u->alsa_rtpoll_item = NULL;
851     }
852
853     pa_log_info("Device suspended...");
854
855     return 0;
856 }
857
858 /* Called from IO context */
859 static int update_sw_params(struct userdata *u) {
860     snd_pcm_uframes_t avail_min;
861     int err;
862
863     pa_assert(u);
864
865     /* Use the full buffer if no one asked us for anything specific */
866     u->hwbuf_unused = 0;
867
868     if (u->use_tsched) {
869         pa_usec_t latency;
870
871         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
872             size_t b;
873
874             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
875
876             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
877
878             /* We need at least one sample in our buffer */
879
880             if (PA_UNLIKELY(b < u->frame_size))
881                 b = u->frame_size;
882
883             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
884         }
885
886         fix_min_sleep_wakeup(u);
887         fix_tsched_watermark(u);
888     }
889
890     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
891
892     avail_min = 1;
893
894     if (u->use_tsched) {
895         pa_usec_t sleep_usec, process_usec;
896
897         hw_sleep_time(u, &sleep_usec, &process_usec);
898         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
899     }
900
901     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
902
903     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
904         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
905         return err;
906     }
907
908     return 0;
909 }
910
911 /* Called from IO Context on unsuspend or from main thread when creating source */
912 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
913                             pa_bool_t in_thread)
914 {
915     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
916                                                     &u->source->sample_spec);
917
918     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
919     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
920
921     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
922     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
923
924     fix_min_sleep_wakeup(u);
925     fix_tsched_watermark(u);
926
927     if (in_thread)
928         pa_source_set_latency_range_within_thread(u->source,
929                                                   u->min_latency_ref,
930                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
931     else {
932         pa_source_set_latency_range(u->source,
933                                     0,
934                                     pa_bytes_to_usec(u->hwbuf_size, ss));
935
936         /* work-around assert in pa_source_set_latency_within_thead,
937            keep track of min_latency and reuse it when
938            this routine is called from IO context */
939         u->min_latency_ref = u->source->thread_info.min_latency;
940     }
941
942     pa_log_info("Time scheduling watermark is %0.2fms",
943                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
944 }
945
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
948     pa_sample_spec ss;
949     int err;
950     pa_bool_t b, d;
951     snd_pcm_uframes_t period_size, buffer_size;
952
953     pa_assert(u);
954     pa_assert(!u->pcm_handle);
955
956     pa_log_info("Trying resume...");
957
958     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
959                             SND_PCM_NONBLOCK|
960                             SND_PCM_NO_AUTO_RESAMPLE|
961                             SND_PCM_NO_AUTO_CHANNELS|
962                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
963         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
964         goto fail;
965     }
966
967     ss = u->source->sample_spec;
968     period_size = u->fragment_size / u->frame_size;
969     buffer_size = u->hwbuf_size / u->frame_size;
970     b = u->use_mmap;
971     d = u->use_tsched;
972
973     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
974         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
975         goto fail;
976     }
977
978     if (b != u->use_mmap || d != u->use_tsched) {
979         pa_log_warn("Resume failed, couldn't get original access mode.");
980         goto fail;
981     }
982
983     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
984         pa_log_warn("Resume failed, couldn't restore original sample settings.");
985         goto fail;
986     }
987
988     if (period_size*u->frame_size != u->fragment_size ||
989         buffer_size*u->frame_size != u->hwbuf_size) {
990         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
993         goto fail;
994     }
995
996     if (update_sw_params(u) < 0)
997         goto fail;
998
999     if (build_pollfd(u) < 0)
1000         goto fail;
1001
1002     /* FIXME: We need to reload the volume somehow */
1003
1004     u->read_count = 0;
1005     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1006     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1007     u->last_smoother_update = 0;
1008
1009     u->first = TRUE;
1010
1011     /* reset the watermark to the value defined when source was created */
1012     if (u->use_tsched)
1013         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1014
1015     pa_log_info("Resumed successfully...");
1016
1017     return 0;
1018
1019 fail:
1020     if (u->pcm_handle) {
1021         snd_pcm_close(u->pcm_handle);
1022         u->pcm_handle = NULL;
1023     }
1024
1025     return -PA_ERR_IO;
1026 }
1027
1028 /* Called from IO context */
1029 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1030     struct userdata *u = PA_SOURCE(o)->userdata;
1031
1032     switch (code) {
1033
1034         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1035             pa_usec_t r = 0;
1036
1037             if (u->pcm_handle)
1038                 r = source_get_latency(u);
1039
1040             *((pa_usec_t*) data) = r;
1041
1042             return 0;
1043         }
1044
1045         case PA_SOURCE_MESSAGE_SET_STATE:
1046
1047             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1048
1049                 case PA_SOURCE_SUSPENDED: {
1050                     int r;
1051
1052                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1053
1054                     if ((r = suspend(u)) < 0)
1055                         return r;
1056
1057                     break;
1058                 }
1059
1060                 case PA_SOURCE_IDLE:
1061                 case PA_SOURCE_RUNNING: {
1062                     int r;
1063
1064                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1065                         if (build_pollfd(u) < 0)
1066                             return -PA_ERR_IO;
1067                     }
1068
1069                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1070                         if ((r = unsuspend(u)) < 0)
1071                             return r;
1072                     }
1073
1074                     break;
1075                 }
1076
1077                 case PA_SOURCE_UNLINKED:
1078                 case PA_SOURCE_INIT:
1079                 case PA_SOURCE_INVALID_STATE:
1080                     ;
1081             }
1082
1083             break;
1084     }
1085
1086     return pa_source_process_msg(o, code, data, offset, chunk);
1087 }
1088
1089 /* Called from main context */
1090 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1091     pa_source_state_t old_state;
1092     struct userdata *u;
1093
1094     pa_source_assert_ref(s);
1095     pa_assert_se(u = s->userdata);
1096
1097     old_state = pa_source_get_state(u->source);
1098
1099     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1100         reserve_done(u);
1101     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1102         if (reserve_init(u, u->device_name) < 0)
1103             return -PA_ERR_BUSY;
1104
1105     return 0;
1106 }
1107
1108 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1109     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1110
1111     pa_assert(u);
1112     pa_assert(u->mixer_handle);
1113
1114     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1115         return 0;
1116
1117     if (!PA_SOURCE_IS_LINKED(u->source->state))
1118         return 0;
1119
1120     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1121         pa_source_set_mixer_dirty(u->source, TRUE);
1122         return 0;
1123     }
1124
1125     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1126         pa_source_get_volume(u->source, TRUE);
1127         pa_source_get_mute(u->source, TRUE);
1128     }
1129
1130     return 0;
1131 }
1132
1133 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1134     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1135
1136     pa_assert(u);
1137     pa_assert(u->mixer_handle);
1138
1139     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1140         return 0;
1141
1142     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1143         pa_source_set_mixer_dirty(u->source, TRUE);
1144         return 0;
1145     }
1146
1147     if (mask & SND_CTL_EVENT_MASK_VALUE)
1148         pa_source_update_volume_and_mute(u->source);
1149
1150     return 0;
1151 }
1152
1153 static void source_get_volume_cb(pa_source *s) {
1154     struct userdata *u = s->userdata;
1155     pa_cvolume r;
1156     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1157
1158     pa_assert(u);
1159     pa_assert(u->mixer_path);
1160     pa_assert(u->mixer_handle);
1161
1162     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1163         return;
1164
1165     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1166     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1167
1168     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1169
1170     if (u->mixer_path->has_dB) {
1171         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1172
1173         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1174     }
1175
1176     if (pa_cvolume_equal(&u->hardware_volume, &r))
1177         return;
1178
1179     s->real_volume = u->hardware_volume = r;
1180
1181     /* Hmm, so the hardware volume changed, let's reset our software volume */
1182     if (u->mixer_path->has_dB)
1183         pa_source_set_soft_volume(s, NULL);
1184 }
1185
1186 static void source_set_volume_cb(pa_source *s) {
1187     struct userdata *u = s->userdata;
1188     pa_cvolume r;
1189     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1190     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1191
1192     pa_assert(u);
1193     pa_assert(u->mixer_path);
1194     pa_assert(u->mixer_handle);
1195
1196     /* Shift up by the base volume */
1197     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1198
1199     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1200         return;
1201
1202     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1203     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1204
1205     u->hardware_volume = r;
1206
1207     if (u->mixer_path->has_dB) {
1208         pa_cvolume new_soft_volume;
1209         pa_bool_t accurate_enough;
1210         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1211
1212         /* Match exactly what the user requested by software */
1213         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1214
1215         /* If the adjustment to do in software is only minimal we
1216          * can skip it. That saves us CPU at the expense of a bit of
1217          * accuracy */
1218         accurate_enough =
1219             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1220             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1221
1222         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1223         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1224         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1225         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1226         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1227                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1228                      pa_yes_no(accurate_enough));
1229         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1230
1231         if (!accurate_enough)
1232             s->soft_volume = new_soft_volume;
1233
1234     } else {
1235         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1236
1237         /* We can't match exactly what the user requested, hence let's
1238          * at least tell the user about it */
1239
1240         s->real_volume = r;
1241     }
1242 }
1243
1244 static void source_write_volume_cb(pa_source *s) {
1245     struct userdata *u = s->userdata;
1246     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1247
1248     pa_assert(u);
1249     pa_assert(u->mixer_path);
1250     pa_assert(u->mixer_handle);
1251     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1252
1253     /* Shift up by the base volume */
1254     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1255
1256     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1257         pa_log_error("Writing HW volume failed");
1258     else {
1259         pa_cvolume tmp_vol;
1260         pa_bool_t accurate_enough;
1261
1262         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1263         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1264
1265         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1266         accurate_enough =
1267             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1268             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1269
1270         if (!accurate_enough) {
1271             union {
1272                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1273                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1274             } vol;
1275
1276             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1277                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1278                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1279             pa_log_debug("                                           in dB: %s (request) != %s",
1280                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1281                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1282         }
1283     }
1284 }
1285
1286 static void source_get_mute_cb(pa_source *s) {
1287     struct userdata *u = s->userdata;
1288     pa_bool_t b;
1289
1290     pa_assert(u);
1291     pa_assert(u->mixer_path);
1292     pa_assert(u->mixer_handle);
1293
1294     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1295         return;
1296
1297     s->muted = b;
1298 }
1299
1300 static void source_set_mute_cb(pa_source *s) {
1301     struct userdata *u = s->userdata;
1302
1303     pa_assert(u);
1304     pa_assert(u->mixer_path);
1305     pa_assert(u->mixer_handle);
1306
1307     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1308 }
1309
1310 static void mixer_volume_init(struct userdata *u) {
1311     pa_assert(u);
1312
1313     if (!u->mixer_path->has_volume) {
1314         pa_source_set_write_volume_callback(u->source, NULL);
1315         pa_source_set_get_volume_callback(u->source, NULL);
1316         pa_source_set_set_volume_callback(u->source, NULL);
1317
1318         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1319     } else {
1320         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1321         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1322
1323         if (u->mixer_path->has_dB && u->deferred_volume) {
1324             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1325             pa_log_info("Successfully enabled deferred volume.");
1326         } else
1327             pa_source_set_write_volume_callback(u->source, NULL);
1328
1329         if (u->mixer_path->has_dB) {
1330             pa_source_enable_decibel_volume(u->source, TRUE);
1331             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1332
1333             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1334             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1335
1336             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1337         } else {
1338             pa_source_enable_decibel_volume(u->source, FALSE);
1339             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1340
1341             u->source->base_volume = PA_VOLUME_NORM;
1342             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1343         }
1344
1345         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1346     }
1347
1348     if (!u->mixer_path->has_mute) {
1349         pa_source_set_get_mute_callback(u->source, NULL);
1350         pa_source_set_set_mute_callback(u->source, NULL);
1351         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1352     } else {
1353         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1354         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1355         pa_log_info("Using hardware mute control.");
1356     }
1357 }
1358
1359 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1360     struct userdata *u = s->userdata;
1361
1362     pa_assert(u);
1363     pa_assert(p);
1364     pa_assert(u->ucm_context);
1365
1366     return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1367 }
1368
1369 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1370     struct userdata *u = s->userdata;
1371     pa_alsa_port_data *data;
1372
1373     pa_assert(u);
1374     pa_assert(p);
1375     pa_assert(u->mixer_handle);
1376
1377     data = PA_DEVICE_PORT_DATA(p);
1378
1379     pa_assert_se(u->mixer_path = data->path);
1380     pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1381
1382     mixer_volume_init(u);
1383
1384     if (s->set_mute)
1385         s->set_mute(s);
1386     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1387         if (s->write_volume)
1388             s->write_volume(s);
1389     } else {
1390         if (s->set_volume)
1391             s->set_volume(s);
1392     }
1393
1394     return 0;
1395 }
1396
1397 static void source_update_requested_latency_cb(pa_source *s) {
1398     struct userdata *u = s->userdata;
1399     pa_assert(u);
1400     pa_assert(u->use_tsched); /* only when timer scheduling is used
1401                                * we can dynamically adjust the
1402                                * latency */
1403
1404     if (!u->pcm_handle)
1405         return;
1406
1407     update_sw_params(u);
1408 }
1409
1410 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1411 {
1412     struct userdata *u = s->userdata;
1413     int i;
1414     pa_bool_t supported = FALSE;
1415
1416     pa_assert(u);
1417
1418     for (i = 0; u->rates[i]; i++) {
1419         if (u->rates[i] == rate) {
1420             supported = TRUE;
1421             break;
1422         }
1423     }
1424
1425     if (!supported) {
1426         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1427         return FALSE;
1428     }
1429
1430     if (!PA_SOURCE_IS_OPENED(s->state)) {
1431         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1432         u->source->sample_spec.rate = rate;
1433         return TRUE;
1434     }
1435
1436     return FALSE;
1437 }
1438
1439 static void thread_func(void *userdata) {
1440     struct userdata *u = userdata;
1441     unsigned short revents = 0;
1442
1443     pa_assert(u);
1444
1445     pa_log_debug("Thread starting up");
1446
1447     if (u->core->realtime_scheduling)
1448         pa_make_realtime(u->core->realtime_priority);
1449
1450     pa_thread_mq_install(&u->thread_mq);
1451
1452     for (;;) {
1453         int ret;
1454         pa_usec_t rtpoll_sleep = 0;
1455
1456 #ifdef DEBUG_TIMING
1457         pa_log_debug("Loop");
1458 #endif
1459
1460         /* Read some data and pass it to the sources */
1461         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1462             int work_done;
1463             pa_usec_t sleep_usec = 0;
1464             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1465
1466             if (u->first) {
1467                 pa_log_info("Starting capture.");
1468                 snd_pcm_start(u->pcm_handle);
1469
1470                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1471
1472                 u->first = FALSE;
1473             }
1474
1475             if (u->use_mmap)
1476                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1477             else
1478                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1479
1480             if (work_done < 0)
1481                 goto fail;
1482
1483 /*             pa_log_debug("work_done = %i", work_done); */
1484
1485             if (work_done)
1486                 update_smoother(u);
1487
1488             if (u->use_tsched) {
1489                 pa_usec_t cusec;
1490
1491                 /* OK, the capture buffer is now empty, let's
1492                  * calculate when to wake up next */
1493
1494 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1495
1496                 /* Convert from the sound card time domain to the
1497                  * system time domain */
1498                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1499
1500 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1501
1502                 /* We don't trust the conversion, so we wake up whatever comes first */
1503                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1504             }
1505         }
1506
1507         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1508             pa_usec_t volume_sleep;
1509             pa_source_volume_change_apply(u->source, &volume_sleep);
1510             if (volume_sleep > 0) {
1511                 if (rtpoll_sleep > 0)
1512                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1513                 else
1514                     rtpoll_sleep = volume_sleep;
1515             }
1516         }
1517
1518         if (rtpoll_sleep > 0)
1519             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1520         else
1521             pa_rtpoll_set_timer_disabled(u->rtpoll);
1522
1523         /* Hmm, nothing to do. Let's sleep */
1524         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1525             goto fail;
1526
1527         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1528             pa_source_volume_change_apply(u->source, NULL);
1529
1530         if (ret == 0)
1531             goto finish;
1532
1533         /* Tell ALSA about this and process its response */
1534         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1535             struct pollfd *pollfd;
1536             int err;
1537             unsigned n;
1538
1539             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1540
1541             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1542                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1543                 goto fail;
1544             }
1545
1546             if (revents & ~POLLIN) {
1547                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1548                     goto fail;
1549
1550                 u->first = TRUE;
1551                 revents = 0;
1552             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1553                 pa_log_debug("Wakeup from ALSA!");
1554
1555         } else
1556             revents = 0;
1557     }
1558
1559 fail:
1560     /* If this was no regular exit from the loop we have to continue
1561      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1562     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1563     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1564
1565 finish:
1566     pa_log_debug("Thread shutting down");
1567 }
1568
1569 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1570     const char *n;
1571     char *t;
1572
1573     pa_assert(data);
1574     pa_assert(ma);
1575     pa_assert(device_name);
1576
1577     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1578         pa_source_new_data_set_name(data, n);
1579         data->namereg_fail = TRUE;
1580         return;
1581     }
1582
1583     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1584         data->namereg_fail = TRUE;
1585     else {
1586         n = device_id ? device_id : device_name;
1587         data->namereg_fail = FALSE;
1588     }
1589
1590     if (mapping)
1591         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1592     else
1593         t = pa_sprintf_malloc("alsa_input.%s", n);
1594
1595     pa_source_new_data_set_name(data, t);
1596     pa_xfree(t);
1597 }
1598
1599 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1600     snd_hctl_t *hctl;
1601
1602     if (!mapping && !element)
1603         return;
1604
1605     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1606         pa_log_info("Failed to find a working mixer device.");
1607         return;
1608     }
1609
1610     if (element) {
1611
1612         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1613             goto fail;
1614
1615         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1616             goto fail;
1617
1618         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1619         pa_alsa_path_dump(u->mixer_path);
1620     } else if (!(u->mixer_path_set = mapping->input_path_set))
1621         goto fail;
1622
1623     return;
1624
1625 fail:
1626
1627     if (u->mixer_path) {
1628         pa_alsa_path_free(u->mixer_path);
1629         u->mixer_path = NULL;
1630     }
1631
1632     if (u->mixer_handle) {
1633         snd_mixer_close(u->mixer_handle);
1634         u->mixer_handle = NULL;
1635     }
1636 }
1637
1638 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1639     pa_bool_t need_mixer_callback = FALSE;
1640
1641     pa_assert(u);
1642
1643     if (!u->mixer_handle)
1644         return 0;
1645
1646     if (u->source->active_port) {
1647         pa_alsa_port_data *data;
1648
1649         /* We have a list of supported paths, so let's activate the
1650          * one that has been chosen as active */
1651
1652         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1653         u->mixer_path = data->path;
1654
1655         pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1656
1657     } else {
1658
1659         if (!u->mixer_path && u->mixer_path_set)
1660             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1661
1662         if (u->mixer_path) {
1663             /* Hmm, we have only a single path, then let's activate it */
1664
1665             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1666         } else
1667             return 0;
1668     }
1669
1670     mixer_volume_init(u);
1671
1672     /* Will we need to register callbacks? */
1673     if (u->mixer_path_set && u->mixer_path_set->paths) {
1674         pa_alsa_path *p;
1675         void *state;
1676
1677         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1678             if (p->has_volume || p->has_mute)
1679                 need_mixer_callback = TRUE;
1680         }
1681     }
1682     else if (u->mixer_path)
1683         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1684
1685     if (need_mixer_callback) {
1686         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1687         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1688             u->mixer_pd = pa_alsa_mixer_pdata_new();
1689             mixer_callback = io_mixer_callback;
1690
1691             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1692                 pa_log("Failed to initialize file descriptor monitoring");
1693                 return -1;
1694             }
1695         } else {
1696             u->mixer_fdl = pa_alsa_fdlist_new();
1697             mixer_callback = ctl_mixer_callback;
1698
1699             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1700                 pa_log("Failed to initialize file descriptor monitoring");
1701                 return -1;
1702             }
1703         }
1704
1705         if (u->mixer_path_set)
1706             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1707         else
1708             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1709     }
1710
1711     return 0;
1712 }
1713
1714 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1715
1716     struct userdata *u = NULL;
1717     const char *dev_id = NULL, *key;
1718     pa_sample_spec ss;
1719     uint32_t alternate_sample_rate;
1720     pa_channel_map map;
1721     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1722     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1723     size_t frame_size;
1724     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1725     pa_source_new_data data;
1726     pa_alsa_profile_set *profile_set = NULL;
1727     void *state = NULL;
1728
1729     pa_assert(m);
1730     pa_assert(ma);
1731
1732     ss = m->core->default_sample_spec;
1733     map = m->core->default_channel_map;
1734     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1735         pa_log("Failed to parse sample specification and channel map");
1736         goto fail;
1737     }
1738
1739     alternate_sample_rate = m->core->alternate_sample_rate;
1740     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1741         pa_log("Failed to parse alternate sample rate");
1742         goto fail;
1743     }
1744
1745     frame_size = pa_frame_size(&ss);
1746
1747     nfrags = m->core->default_n_fragments;
1748     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1749     if (frag_size <= 0)
1750         frag_size = (uint32_t) frame_size;
1751     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1752     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1753
1754     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1755         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1756         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1757         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1758         pa_log("Failed to parse buffer metrics");
1759         goto fail;
1760     }
1761
1762     buffer_size = nfrags * frag_size;
1763
1764     period_frames = frag_size/frame_size;
1765     buffer_frames = buffer_size/frame_size;
1766     tsched_frames = tsched_size/frame_size;
1767
1768     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1769         pa_log("Failed to parse mmap argument.");
1770         goto fail;
1771     }
1772
1773     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1774         pa_log("Failed to parse tsched argument.");
1775         goto fail;
1776     }
1777
1778     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1779         pa_log("Failed to parse ignore_dB argument.");
1780         goto fail;
1781     }
1782
1783     deferred_volume = m->core->deferred_volume;
1784     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1785         pa_log("Failed to parse deferred_volume argument.");
1786         goto fail;
1787     }
1788
1789     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1790         pa_log("Failed to parse fixed_latency_range argument.");
1791         goto fail;
1792     }
1793
1794     use_tsched = pa_alsa_may_tsched(use_tsched);
1795
1796     u = pa_xnew0(struct userdata, 1);
1797     u->core = m->core;
1798     u->module = m;
1799     u->use_mmap = use_mmap;
1800     u->use_tsched = use_tsched;
1801     u->deferred_volume = deferred_volume;
1802     u->fixed_latency_range = fixed_latency_range;
1803     u->first = TRUE;
1804     u->rtpoll = pa_rtpoll_new();
1805     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1806
1807     u->smoother = pa_smoother_new(
1808             SMOOTHER_ADJUST_USEC,
1809             SMOOTHER_WINDOW_USEC,
1810             TRUE,
1811             TRUE,
1812             5,
1813             pa_rtclock_now(),
1814             TRUE);
1815     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1816
1817     /* use ucm */
1818     if (mapping && mapping->ucm_context.ucm)
1819         u->ucm_context = &mapping->ucm_context;
1820
1821     dev_id = pa_modargs_get_value(
1822             ma, "device_id",
1823             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1824
1825     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1826
1827     if (reserve_init(u, dev_id) < 0)
1828         goto fail;
1829
1830     if (reserve_monitor_init(u, dev_id) < 0)
1831         goto fail;
1832
1833     b = use_mmap;
1834     d = use_tsched;
1835
1836     if (mapping) {
1837
1838         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1839             pa_log("device_id= not set");
1840             goto fail;
1841         }
1842
1843         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1844                       dev_id,
1845                       &u->device_name,
1846                       &ss, &map,
1847                       SND_PCM_STREAM_CAPTURE,
1848                       &period_frames, &buffer_frames, tsched_frames,
1849                       &b, &d, mapping)))
1850             goto fail;
1851
1852     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1853
1854         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1855             goto fail;
1856
1857         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1858                       dev_id,
1859                       &u->device_name,
1860                       &ss, &map,
1861                       SND_PCM_STREAM_CAPTURE,
1862                       &period_frames, &buffer_frames, tsched_frames,
1863                       &b, &d, profile_set, &mapping)))
1864             goto fail;
1865
1866     } else {
1867
1868         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1869                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1870                       &u->device_name,
1871                       &ss, &map,
1872                       SND_PCM_STREAM_CAPTURE,
1873                       &period_frames, &buffer_frames, tsched_frames,
1874                       &b, &d, FALSE)))
1875             goto fail;
1876     }
1877
1878     pa_assert(u->device_name);
1879     pa_log_info("Successfully opened device %s.", u->device_name);
1880
1881     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1882         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1883         goto fail;
1884     }
1885
1886     if (mapping)
1887         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1888
1889     if (use_mmap && !b) {
1890         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1891         u->use_mmap = use_mmap = FALSE;
1892     }
1893
1894     if (use_tsched && (!b || !d)) {
1895         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1896         u->use_tsched = use_tsched = FALSE;
1897     }
1898
1899     if (u->use_mmap)
1900         pa_log_info("Successfully enabled mmap() mode.");
1901
1902     if (u->use_tsched) {
1903         pa_log_info("Successfully enabled timer-based scheduling mode.");
1904         if (u->fixed_latency_range)
1905             pa_log_info("Disabling latency range changes on overrun");
1906     }
1907
1908     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1909     if (!u->rates) {
1910         pa_log_error("Failed to find any supported sample rates.");
1911         goto fail;
1912     }
1913
1914     /* ALSA might tweak the sample spec, so recalculate the frame size */
1915     frame_size = pa_frame_size(&ss);
1916
1917     if (!u->ucm_context)
1918         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1919
1920     pa_source_new_data_init(&data);
1921     data.driver = driver;
1922     data.module = m;
1923     data.card = card;
1924     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1925
1926     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1927      * variable instead of using &data.namereg_fail directly, because
1928      * data.namereg_fail is a bitfield and taking the address of a bitfield
1929      * variable is impossible. */
1930     namereg_fail = data.namereg_fail;
1931     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1932         pa_log("Failed to parse namereg_fail argument.");
1933         pa_source_new_data_done(&data);
1934         goto fail;
1935     }
1936     data.namereg_fail = namereg_fail;
1937
1938     pa_source_new_data_set_sample_spec(&data, &ss);
1939     pa_source_new_data_set_channel_map(&data, &map);
1940     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1941
1942     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1943     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1944     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1945     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1946     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1947
1948     if (mapping) {
1949         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1950         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1951
1952         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1953             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1954     }
1955
1956     pa_alsa_init_description(data.proplist);
1957
1958     if (u->control_device)
1959         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1960
1961     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1962         pa_log("Invalid properties");
1963         pa_source_new_data_done(&data);
1964         goto fail;
1965     }
1966
1967     if (u->ucm_context)
1968         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1969     else if (u->mixer_path_set)
1970         pa_alsa_add_ports(&data, u->mixer_path_set, card);
1971
1972     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1973     pa_source_new_data_done(&data);
1974
1975     if (!u->source) {
1976         pa_log("Failed to create source object");
1977         goto fail;
1978     }
1979
1980     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1981                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1982         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1983         goto fail;
1984     }
1985
1986     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1987                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1988         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1989         goto fail;
1990     }
1991
1992     u->source->parent.process_msg = source_process_msg;
1993     if (u->use_tsched)
1994         u->source->update_requested_latency = source_update_requested_latency_cb;
1995     u->source->set_state = source_set_state_cb;
1996     if (u->ucm_context)
1997         u->source->set_port = source_set_port_ucm_cb;
1998     else
1999         u->source->set_port = source_set_port_cb;
2000     if (u->source->alternate_sample_rate)
2001         u->source->update_rate = source_update_rate_cb;
2002     u->source->userdata = u;
2003
2004     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2005     pa_source_set_rtpoll(u->source, u->rtpoll);
2006
2007     u->frame_size = frame_size;
2008     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2009     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2010     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2011
2012     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2013                 (double) u->hwbuf_size / (double) u->fragment_size,
2014                 (long unsigned) u->fragment_size,
2015                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2016                 (long unsigned) u->hwbuf_size,
2017                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2018
2019     if (u->use_tsched) {
2020         u->tsched_watermark_ref = tsched_watermark;
2021         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2022     }
2023     else
2024         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2025
2026     reserve_update(u);
2027
2028     if (update_sw_params(u) < 0)
2029         goto fail;
2030
2031     if (u->ucm_context) {
2032         if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2033             goto fail;
2034     } else if (setup_mixer(u, ignore_dB) < 0)
2035         goto fail;
2036
2037     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2038
2039     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2040         pa_log("Failed to create thread.");
2041         goto fail;
2042     }
2043
2044     /* Get initial mixer settings */
2045     if (data.volume_is_set) {
2046         if (u->source->set_volume)
2047             u->source->set_volume(u->source);
2048     } else {
2049         if (u->source->get_volume)
2050             u->source->get_volume(u->source);
2051     }
2052
2053     if (data.muted_is_set) {
2054         if (u->source->set_mute)
2055             u->source->set_mute(u->source);
2056     } else {
2057         if (u->source->get_mute)
2058             u->source->get_mute(u->source);
2059     }
2060
2061     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2062         u->source->write_volume(u->source);
2063
2064     pa_source_put(u->source);
2065
2066     if (profile_set)
2067         pa_alsa_profile_set_free(profile_set);
2068
2069     return u->source;
2070
2071 fail:
2072
2073     if (u)
2074         userdata_free(u);
2075
2076     if (profile_set)
2077         pa_alsa_profile_set_free(profile_set);
2078
2079     return NULL;
2080 }
2081
2082 static void userdata_free(struct userdata *u) {
2083     pa_assert(u);
2084
2085     if (u->source)
2086         pa_source_unlink(u->source);
2087
2088     if (u->thread) {
2089         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2090         pa_thread_free(u->thread);
2091     }
2092
2093     pa_thread_mq_done(&u->thread_mq);
2094
2095     if (u->source)
2096         pa_source_unref(u->source);
2097
2098     if (u->mixer_pd)
2099         pa_alsa_mixer_pdata_free(u->mixer_pd);
2100
2101     if (u->alsa_rtpoll_item)
2102         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2103
2104     if (u->rtpoll)
2105         pa_rtpoll_free(u->rtpoll);
2106
2107     if (u->pcm_handle) {
2108         snd_pcm_drop(u->pcm_handle);
2109         snd_pcm_close(u->pcm_handle);
2110     }
2111
2112     if (u->mixer_fdl)
2113         pa_alsa_fdlist_free(u->mixer_fdl);
2114
2115     if (u->mixer_path && !u->mixer_path_set)
2116         pa_alsa_path_free(u->mixer_path);
2117
2118     if (u->mixer_handle)
2119         snd_mixer_close(u->mixer_handle);
2120
2121     if (u->smoother)
2122         pa_smoother_free(u->smoother);
2123
2124     if (u->rates)
2125         pa_xfree(u->rates);
2126
2127     reserve_done(u);
2128     monitor_done(u);
2129
2130     pa_xfree(u->device_name);
2131     pa_xfree(u->control_device);
2132     pa_xfree(u->paths_dir);
2133     pa_xfree(u);
2134 }
2135
2136 void pa_alsa_source_free(pa_source *s) {
2137     struct userdata *u;
2138
2139     pa_source_assert_ref(s);
2140     pa_assert_se(u = s->userdata);
2141
2142     userdata_free(u);
2143 }