alsa: move pa_alsa_setting_select close to pa_alsa_path_select
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
76 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84     pa_core *core;
85     pa_module *module;
86     pa_source *source;
87
88     pa_thread *thread;
89     pa_thread_mq thread_mq;
90     pa_rtpoll *rtpoll;
91
92     snd_pcm_t *pcm_handle;
93
94     char *paths_dir;
95     pa_alsa_fdlist *mixer_fdl;
96     pa_alsa_mixer_pdata *mixer_pd;
97     snd_mixer_t *mixer_handle;
98     pa_alsa_path_set *mixer_path_set;
99     pa_alsa_path *mixer_path;
100
101     pa_cvolume hardware_volume;
102
103     unsigned int *rates;
104
105     size_t
106         frame_size,
107         fragment_size,
108         hwbuf_size,
109         tsched_watermark,
110         tsched_watermark_ref,
111         hwbuf_unused,
112         min_sleep,
113         min_wakeup,
114         watermark_inc_step,
115         watermark_dec_step,
116         watermark_inc_threshold,
117         watermark_dec_threshold;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121
122     char *device_name;  /* name of the PCM device */
123     char *control_device; /* name of the control device */
124
125     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
126
127     pa_bool_t first;
128
129     pa_rtpoll_item *alsa_rtpoll_item;
130
131     pa_smoother *smoother;
132     uint64_t read_count;
133     pa_usec_t smoother_interval;
134     pa_usec_t last_smoother_update;
135
136     pa_reserve_wrapper *reserve;
137     pa_hook_slot *reserve_slot;
138     pa_reserve_monitor_wrapper *monitor;
139     pa_hook_slot *monitor_slot;
140 };
141
142 static void userdata_free(struct userdata *u);
143
144 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
145     pa_assert(r);
146     pa_assert(u);
147
148     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
149         return PA_HOOK_CANCEL;
150
151     return PA_HOOK_OK;
152 }
153
154 static void reserve_done(struct userdata *u) {
155     pa_assert(u);
156
157     if (u->reserve_slot) {
158         pa_hook_slot_free(u->reserve_slot);
159         u->reserve_slot = NULL;
160     }
161
162     if (u->reserve) {
163         pa_reserve_wrapper_unref(u->reserve);
164         u->reserve = NULL;
165     }
166 }
167
168 static void reserve_update(struct userdata *u) {
169     const char *description;
170     pa_assert(u);
171
172     if (!u->source || !u->reserve)
173         return;
174
175     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
176         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
177 }
178
179 static int reserve_init(struct userdata *u, const char *dname) {
180     char *rname;
181
182     pa_assert(u);
183     pa_assert(dname);
184
185     if (u->reserve)
186         return 0;
187
188     if (pa_in_system_mode())
189         return 0;
190
191     if (!(rname = pa_alsa_get_reserve_name(dname)))
192         return 0;
193
194     /* We are resuming, try to lock the device */
195     u->reserve = pa_reserve_wrapper_get(u->core, rname);
196     pa_xfree(rname);
197
198     if (!(u->reserve))
199         return -1;
200
201     reserve_update(u);
202
203     pa_assert(!u->reserve_slot);
204     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205
206     return 0;
207 }
208
209 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
210     pa_bool_t b;
211
212     pa_assert(w);
213     pa_assert(u);
214
215     b = PA_PTR_TO_UINT(busy) && !u->reserve;
216
217     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
218     return PA_HOOK_OK;
219 }
220
221 static void monitor_done(struct userdata *u) {
222     pa_assert(u);
223
224     if (u->monitor_slot) {
225         pa_hook_slot_free(u->monitor_slot);
226         u->monitor_slot = NULL;
227     }
228
229     if (u->monitor) {
230         pa_reserve_monitor_wrapper_unref(u->monitor);
231         u->monitor = NULL;
232     }
233 }
234
235 static int reserve_monitor_init(struct userdata *u, const char *dname) {
236     char *rname;
237
238     pa_assert(u);
239     pa_assert(dname);
240
241     if (pa_in_system_mode())
242         return 0;
243
244     if (!(rname = pa_alsa_get_reserve_name(dname)))
245         return 0;
246
247     /* We are resuming, try to lock the device */
248     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
249     pa_xfree(rname);
250
251     if (!(u->monitor))
252         return -1;
253
254     pa_assert(!u->monitor_slot);
255     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
256
257     return 0;
258 }
259
260 static void fix_min_sleep_wakeup(struct userdata *u) {
261     size_t max_use, max_use_2;
262
263     pa_assert(u);
264     pa_assert(u->use_tsched);
265
266     max_use = u->hwbuf_size - u->hwbuf_unused;
267     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
268
269     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
270     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
271
272     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
273     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
274 }
275
276 static void fix_tsched_watermark(struct userdata *u) {
277     size_t max_use;
278     pa_assert(u);
279     pa_assert(u->use_tsched);
280
281     max_use = u->hwbuf_size - u->hwbuf_unused;
282
283     if (u->tsched_watermark > max_use - u->min_sleep)
284         u->tsched_watermark = max_use - u->min_sleep;
285
286     if (u->tsched_watermark < u->min_wakeup)
287         u->tsched_watermark = u->min_wakeup;
288 }
289
290 static void increase_watermark(struct userdata *u) {
291     size_t old_watermark;
292     pa_usec_t old_min_latency, new_min_latency;
293
294     pa_assert(u);
295     pa_assert(u->use_tsched);
296
297     /* First, just try to increase the watermark */
298     old_watermark = u->tsched_watermark;
299     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
300     fix_tsched_watermark(u);
301
302     if (old_watermark != u->tsched_watermark) {
303         pa_log_info("Increasing wakeup watermark to %0.2f ms",
304                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
305         return;
306     }
307
308     /* Hmm, we cannot increase the watermark any further, hence let's
309      raise the latency unless doing so was disabled in
310      configuration */
311     if (u->fixed_latency_range)
312         return;
313
314     old_min_latency = u->source->thread_info.min_latency;
315     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
316     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
317
318     if (old_min_latency != new_min_latency) {
319         pa_log_info("Increasing minimal latency to %0.2f ms",
320                     (double) new_min_latency / PA_USEC_PER_MSEC);
321
322         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
323     }
324
325     /* When we reach this we're officialy fucked! */
326 }
327
328 static void decrease_watermark(struct userdata *u) {
329     size_t old_watermark;
330     pa_usec_t now;
331
332     pa_assert(u);
333     pa_assert(u->use_tsched);
334
335     now = pa_rtclock_now();
336
337     if (u->watermark_dec_not_before <= 0)
338         goto restart;
339
340     if (u->watermark_dec_not_before > now)
341         return;
342
343     old_watermark = u->tsched_watermark;
344
345     if (u->tsched_watermark < u->watermark_dec_step)
346         u->tsched_watermark = u->tsched_watermark / 2;
347     else
348         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
349
350     fix_tsched_watermark(u);
351
352     if (old_watermark != u->tsched_watermark)
353         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
354                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
355
356     /* We don't change the latency range*/
357
358 restart:
359     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
360 }
361
362 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
363     pa_usec_t wm, usec;
364
365     pa_assert(sleep_usec);
366     pa_assert(process_usec);
367
368     pa_assert(u);
369     pa_assert(u->use_tsched);
370
371     usec = pa_source_get_requested_latency_within_thread(u->source);
372
373     if (usec == (pa_usec_t) -1)
374         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
375
376     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
377
378     if (wm > usec)
379         wm = usec/2;
380
381     *sleep_usec = usec - wm;
382     *process_usec = wm;
383
384 #ifdef DEBUG_TIMING
385     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
386                  (unsigned long) (usec / PA_USEC_PER_MSEC),
387                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
388                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
389 #endif
390 }
391
392 static int try_recover(struct userdata *u, const char *call, int err) {
393     pa_assert(u);
394     pa_assert(call);
395     pa_assert(err < 0);
396
397     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
398
399     pa_assert(err != -EAGAIN);
400
401     if (err == -EPIPE)
402         pa_log_debug("%s: Buffer overrun!", call);
403
404     if (err == -ESTRPIPE)
405         pa_log_debug("%s: System suspended!", call);
406
407     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
408         pa_log("%s: %s", call, pa_alsa_strerror(err));
409         return -1;
410     }
411
412     u->first = TRUE;
413     return 0;
414 }
415
416 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
417     size_t left_to_record;
418     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
419     pa_bool_t overrun = FALSE;
420
421     /* We use <= instead of < for this check here because an overrun
422      * only happens after the last sample was processed, not already when
423      * it is removed from the buffer. This is particularly important
424      * when block transfer is used. */
425
426     if (n_bytes <= rec_space)
427         left_to_record = rec_space - n_bytes;
428     else {
429
430         /* We got a dropout. What a mess! */
431         left_to_record = 0;
432         overrun = TRUE;
433
434 #ifdef DEBUG_TIMING
435         PA_DEBUG_TRAP;
436 #endif
437
438         if (pa_log_ratelimit(PA_LOG_INFO))
439             pa_log_info("Overrun!");
440     }
441
442 #ifdef DEBUG_TIMING
443     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
444 #endif
445
446     if (u->use_tsched) {
447         pa_bool_t reset_not_before = TRUE;
448
449         if (overrun || left_to_record < u->watermark_inc_threshold)
450             increase_watermark(u);
451         else if (left_to_record > u->watermark_dec_threshold) {
452             reset_not_before = FALSE;
453
454             /* We decrease the watermark only if have actually
455              * been woken up by a timeout. If something else woke
456              * us up it's too easy to fulfill the deadlines... */
457
458             if (on_timeout)
459                 decrease_watermark(u);
460         }
461
462         if (reset_not_before)
463             u->watermark_dec_not_before = 0;
464     }
465
466     return left_to_record;
467 }
468
469 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
470     pa_bool_t work_done = FALSE;
471     pa_usec_t max_sleep_usec = 0, process_usec = 0;
472     size_t left_to_record;
473     unsigned j = 0;
474
475     pa_assert(u);
476     pa_source_assert_ref(u->source);
477
478     if (u->use_tsched)
479         hw_sleep_time(u, &max_sleep_usec, &process_usec);
480
481     for (;;) {
482         snd_pcm_sframes_t n;
483         size_t n_bytes;
484         int r;
485         pa_bool_t after_avail = TRUE;
486
487         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
488
489             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
490                 continue;
491
492             return r;
493         }
494
495         n_bytes = (size_t) n * u->frame_size;
496
497 #ifdef DEBUG_TIMING
498         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
499 #endif
500
501         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
502         on_timeout = FALSE;
503
504         if (u->use_tsched)
505             if (!polled &&
506                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
507 #ifdef DEBUG_TIMING
508                 pa_log_debug("Not reading, because too early.");
509 #endif
510                 break;
511             }
512
513         if (PA_UNLIKELY(n_bytes <= 0)) {
514
515             if (polled)
516                 PA_ONCE_BEGIN {
517                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
518                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
519                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
520                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
521                            pa_strnull(dn));
522                     pa_xfree(dn);
523                 } PA_ONCE_END;
524
525 #ifdef DEBUG_TIMING
526             pa_log_debug("Not reading, because not necessary.");
527 #endif
528             break;
529         }
530
531
532         if (++j > 10) {
533 #ifdef DEBUG_TIMING
534             pa_log_debug("Not filling up, because already too many iterations.");
535 #endif
536
537             break;
538         }
539
540         polled = FALSE;
541
542 #ifdef DEBUG_TIMING
543         pa_log_debug("Reading");
544 #endif
545
546         for (;;) {
547             pa_memchunk chunk;
548             void *p;
549             int err;
550             const snd_pcm_channel_area_t *areas;
551             snd_pcm_uframes_t offset, frames;
552             snd_pcm_sframes_t sframes;
553
554             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
555 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
556
557             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
558
559                 if (!after_avail && err == -EAGAIN)
560                     break;
561
562                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
563                     continue;
564
565                 return r;
566             }
567
568             /* Make sure that if these memblocks need to be copied they will fit into one slot */
569             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
570                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
571
572             if (!after_avail && frames == 0)
573                 break;
574
575             pa_assert(frames > 0);
576             after_avail = FALSE;
577
578             /* Check these are multiples of 8 bit */
579             pa_assert((areas[0].first & 7) == 0);
580             pa_assert((areas[0].step & 7)== 0);
581
582             /* We assume a single interleaved memory buffer */
583             pa_assert((areas[0].first >> 3) == 0);
584             pa_assert((areas[0].step >> 3) == u->frame_size);
585
586             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
587
588             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
589             chunk.length = pa_memblock_get_length(chunk.memblock);
590             chunk.index = 0;
591
592             pa_source_post(u->source, &chunk);
593             pa_memblock_unref_fixed(chunk.memblock);
594
595             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
596
597                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
598                     continue;
599
600                 return r;
601             }
602
603             work_done = TRUE;
604
605             u->read_count += frames * u->frame_size;
606
607 #ifdef DEBUG_TIMING
608             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
609 #endif
610
611             if ((size_t) frames * u->frame_size >= n_bytes)
612                 break;
613
614             n_bytes -= (size_t) frames * u->frame_size;
615         }
616     }
617
618     if (u->use_tsched) {
619         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
620         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
621
622         if (*sleep_usec > process_usec)
623             *sleep_usec -= process_usec;
624         else
625             *sleep_usec = 0;
626     }
627
628     return work_done ? 1 : 0;
629 }
630
631 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
632     int work_done = FALSE;
633     pa_usec_t max_sleep_usec = 0, process_usec = 0;
634     size_t left_to_record;
635     unsigned j = 0;
636
637     pa_assert(u);
638     pa_source_assert_ref(u->source);
639
640     if (u->use_tsched)
641         hw_sleep_time(u, &max_sleep_usec, &process_usec);
642
643     for (;;) {
644         snd_pcm_sframes_t n;
645         size_t n_bytes;
646         int r;
647         pa_bool_t after_avail = TRUE;
648
649         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
650
651             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
652                 continue;
653
654             return r;
655         }
656
657         n_bytes = (size_t) n * u->frame_size;
658         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
659         on_timeout = FALSE;
660
661         if (u->use_tsched)
662             if (!polled &&
663                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
664                 break;
665
666         if (PA_UNLIKELY(n_bytes <= 0)) {
667
668             if (polled)
669                 PA_ONCE_BEGIN {
670                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
671                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
672                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
673                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
674                            pa_strnull(dn));
675                     pa_xfree(dn);
676                 } PA_ONCE_END;
677
678             break;
679         }
680
681         if (++j > 10) {
682 #ifdef DEBUG_TIMING
683             pa_log_debug("Not filling up, because already too many iterations.");
684 #endif
685
686             break;
687         }
688
689         polled = FALSE;
690
691         for (;;) {
692             void *p;
693             snd_pcm_sframes_t frames;
694             pa_memchunk chunk;
695
696             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
697
698             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
699
700             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
701                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
702
703 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
704
705             p = pa_memblock_acquire(chunk.memblock);
706             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
707             pa_memblock_release(chunk.memblock);
708
709             if (PA_UNLIKELY(frames < 0)) {
710                 pa_memblock_unref(chunk.memblock);
711
712                 if (!after_avail && (int) frames == -EAGAIN)
713                     break;
714
715                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
716                     continue;
717
718                 return r;
719             }
720
721             if (!after_avail && frames == 0) {
722                 pa_memblock_unref(chunk.memblock);
723                 break;
724             }
725
726             pa_assert(frames > 0);
727             after_avail = FALSE;
728
729             chunk.index = 0;
730             chunk.length = (size_t) frames * u->frame_size;
731
732             pa_source_post(u->source, &chunk);
733             pa_memblock_unref(chunk.memblock);
734
735             work_done = TRUE;
736
737             u->read_count += frames * u->frame_size;
738
739 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
740
741             if ((size_t) frames * u->frame_size >= n_bytes)
742                 break;
743
744             n_bytes -= (size_t) frames * u->frame_size;
745         }
746     }
747
748     if (u->use_tsched) {
749         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
750         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
751
752         if (*sleep_usec > process_usec)
753             *sleep_usec -= process_usec;
754         else
755             *sleep_usec = 0;
756     }
757
758     return work_done ? 1 : 0;
759 }
760
761 static void update_smoother(struct userdata *u) {
762     snd_pcm_sframes_t delay = 0;
763     uint64_t position;
764     int err;
765     pa_usec_t now1 = 0, now2;
766     snd_pcm_status_t *status;
767
768     snd_pcm_status_alloca(&status);
769
770     pa_assert(u);
771     pa_assert(u->pcm_handle);
772
773     /* Let's update the time smoother */
774
775     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
776         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
777         return;
778     }
779
780     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
781         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
782     else {
783         snd_htimestamp_t htstamp = { 0, 0 };
784         snd_pcm_status_get_htstamp(status, &htstamp);
785         now1 = pa_timespec_load(&htstamp);
786     }
787
788     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
789     if (now1 <= 0)
790         now1 = pa_rtclock_now();
791
792     /* check if the time since the last update is bigger than the interval */
793     if (u->last_smoother_update > 0)
794         if (u->last_smoother_update + u->smoother_interval > now1)
795             return;
796
797     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
798     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
799
800     pa_smoother_put(u->smoother, now1, now2);
801
802     u->last_smoother_update = now1;
803     /* exponentially increase the update interval up to the MAX limit */
804     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
805 }
806
807 static pa_usec_t source_get_latency(struct userdata *u) {
808     int64_t delay;
809     pa_usec_t now1, now2;
810
811     pa_assert(u);
812
813     now1 = pa_rtclock_now();
814     now2 = pa_smoother_get(u->smoother, now1);
815
816     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
817
818     return delay >= 0 ? (pa_usec_t) delay : 0;
819 }
820
821 static int build_pollfd(struct userdata *u) {
822     pa_assert(u);
823     pa_assert(u->pcm_handle);
824
825     if (u->alsa_rtpoll_item)
826         pa_rtpoll_item_free(u->alsa_rtpoll_item);
827
828     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
829         return -1;
830
831     return 0;
832 }
833
834 /* Called from IO context */
835 static int suspend(struct userdata *u) {
836     pa_assert(u);
837     pa_assert(u->pcm_handle);
838
839     pa_smoother_pause(u->smoother, pa_rtclock_now());
840
841     /* Let's suspend */
842     snd_pcm_close(u->pcm_handle);
843     u->pcm_handle = NULL;
844
845     if (u->alsa_rtpoll_item) {
846         pa_rtpoll_item_free(u->alsa_rtpoll_item);
847         u->alsa_rtpoll_item = NULL;
848     }
849
850     pa_log_info("Device suspended...");
851
852     return 0;
853 }
854
855 /* Called from IO context */
856 static int update_sw_params(struct userdata *u) {
857     snd_pcm_uframes_t avail_min;
858     int err;
859
860     pa_assert(u);
861
862     /* Use the full buffer if no one asked us for anything specific */
863     u->hwbuf_unused = 0;
864
865     if (u->use_tsched) {
866         pa_usec_t latency;
867
868         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
869             size_t b;
870
871             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
872
873             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
874
875             /* We need at least one sample in our buffer */
876
877             if (PA_UNLIKELY(b < u->frame_size))
878                 b = u->frame_size;
879
880             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
881         }
882
883         fix_min_sleep_wakeup(u);
884         fix_tsched_watermark(u);
885     }
886
887     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
888
889     avail_min = 1;
890
891     if (u->use_tsched) {
892         pa_usec_t sleep_usec, process_usec;
893
894         hw_sleep_time(u, &sleep_usec, &process_usec);
895         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
896     }
897
898     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
899
900     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
901         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
902         return err;
903     }
904
905     return 0;
906 }
907
908 /* Called from IO Context on unsuspend or from main thread when creating source */
909 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
910                             pa_bool_t in_thread)
911 {
912     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
913                                                     &u->source->sample_spec);
914
915     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
916     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
917
918     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
919     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
920
921     fix_min_sleep_wakeup(u);
922     fix_tsched_watermark(u);
923
924     if (in_thread)
925         pa_source_set_latency_range_within_thread(u->source,
926                                                   u->min_latency_ref,
927                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
928     else {
929         pa_source_set_latency_range(u->source,
930                                     0,
931                                     pa_bytes_to_usec(u->hwbuf_size, ss));
932
933         /* work-around assert in pa_source_set_latency_within_thead,
934            keep track of min_latency and reuse it when
935            this routine is called from IO context */
936         u->min_latency_ref = u->source->thread_info.min_latency;
937     }
938
939     pa_log_info("Time scheduling watermark is %0.2fms",
940                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
941 }
942
943 /* Called from IO context */
944 static int unsuspend(struct userdata *u) {
945     pa_sample_spec ss;
946     int err;
947     pa_bool_t b, d;
948     snd_pcm_uframes_t period_size, buffer_size;
949
950     pa_assert(u);
951     pa_assert(!u->pcm_handle);
952
953     pa_log_info("Trying resume...");
954
955     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
956                             SND_PCM_NONBLOCK|
957                             SND_PCM_NO_AUTO_RESAMPLE|
958                             SND_PCM_NO_AUTO_CHANNELS|
959                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
960         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
961         goto fail;
962     }
963
964     ss = u->source->sample_spec;
965     period_size = u->fragment_size / u->frame_size;
966     buffer_size = u->hwbuf_size / u->frame_size;
967     b = u->use_mmap;
968     d = u->use_tsched;
969
970     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
971         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
972         goto fail;
973     }
974
975     if (b != u->use_mmap || d != u->use_tsched) {
976         pa_log_warn("Resume failed, couldn't get original access mode.");
977         goto fail;
978     }
979
980     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
981         pa_log_warn("Resume failed, couldn't restore original sample settings.");
982         goto fail;
983     }
984
985     if (period_size*u->frame_size != u->fragment_size ||
986         buffer_size*u->frame_size != u->hwbuf_size) {
987         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
988                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
989                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
990         goto fail;
991     }
992
993     if (update_sw_params(u) < 0)
994         goto fail;
995
996     if (build_pollfd(u) < 0)
997         goto fail;
998
999     /* FIXME: We need to reload the volume somehow */
1000
1001     u->read_count = 0;
1002     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1003     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1004     u->last_smoother_update = 0;
1005
1006     u->first = TRUE;
1007
1008     /* reset the watermark to the value defined when source was created */
1009     if (u->use_tsched)
1010         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1011
1012     pa_log_info("Resumed successfully...");
1013
1014     return 0;
1015
1016 fail:
1017     if (u->pcm_handle) {
1018         snd_pcm_close(u->pcm_handle);
1019         u->pcm_handle = NULL;
1020     }
1021
1022     return -PA_ERR_IO;
1023 }
1024
1025 /* Called from IO context */
1026 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1027     struct userdata *u = PA_SOURCE(o)->userdata;
1028
1029     switch (code) {
1030
1031         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1032             pa_usec_t r = 0;
1033
1034             if (u->pcm_handle)
1035                 r = source_get_latency(u);
1036
1037             *((pa_usec_t*) data) = r;
1038
1039             return 0;
1040         }
1041
1042         case PA_SOURCE_MESSAGE_SET_STATE:
1043
1044             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1045
1046                 case PA_SOURCE_SUSPENDED: {
1047                     int r;
1048
1049                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1050
1051                     if ((r = suspend(u)) < 0)
1052                         return r;
1053
1054                     break;
1055                 }
1056
1057                 case PA_SOURCE_IDLE:
1058                 case PA_SOURCE_RUNNING: {
1059                     int r;
1060
1061                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1062                         if (build_pollfd(u) < 0)
1063                             return -PA_ERR_IO;
1064                     }
1065
1066                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1067                         if ((r = unsuspend(u)) < 0)
1068                             return r;
1069                     }
1070
1071                     break;
1072                 }
1073
1074                 case PA_SOURCE_UNLINKED:
1075                 case PA_SOURCE_INIT:
1076                 case PA_SOURCE_INVALID_STATE:
1077                     ;
1078             }
1079
1080             break;
1081     }
1082
1083     return pa_source_process_msg(o, code, data, offset, chunk);
1084 }
1085
1086 /* Called from main context */
1087 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1088     pa_source_state_t old_state;
1089     struct userdata *u;
1090
1091     pa_source_assert_ref(s);
1092     pa_assert_se(u = s->userdata);
1093
1094     old_state = pa_source_get_state(u->source);
1095
1096     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1097         reserve_done(u);
1098     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1099         if (reserve_init(u, u->device_name) < 0)
1100             return -PA_ERR_BUSY;
1101
1102     return 0;
1103 }
1104
1105 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1106     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1107
1108     pa_assert(u);
1109     pa_assert(u->mixer_handle);
1110
1111     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1112         return 0;
1113
1114     if (!PA_SOURCE_IS_LINKED(u->source->state))
1115         return 0;
1116
1117     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1118         pa_source_set_mixer_dirty(u->source, TRUE);
1119         return 0;
1120     }
1121
1122     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1123         pa_source_get_volume(u->source, TRUE);
1124         pa_source_get_mute(u->source, TRUE);
1125     }
1126
1127     return 0;
1128 }
1129
1130 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1131     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1132
1133     pa_assert(u);
1134     pa_assert(u->mixer_handle);
1135
1136     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1137         return 0;
1138
1139     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1140         pa_source_set_mixer_dirty(u->source, TRUE);
1141         return 0;
1142     }
1143
1144     if (mask & SND_CTL_EVENT_MASK_VALUE)
1145         pa_source_update_volume_and_mute(u->source);
1146
1147     return 0;
1148 }
1149
1150 static void source_get_volume_cb(pa_source *s) {
1151     struct userdata *u = s->userdata;
1152     pa_cvolume r;
1153     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1154
1155     pa_assert(u);
1156     pa_assert(u->mixer_path);
1157     pa_assert(u->mixer_handle);
1158
1159     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1160         return;
1161
1162     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1163     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1164
1165     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1166
1167     if (u->mixer_path->has_dB) {
1168         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1169
1170         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1171     }
1172
1173     if (pa_cvolume_equal(&u->hardware_volume, &r))
1174         return;
1175
1176     s->real_volume = u->hardware_volume = r;
1177
1178     /* Hmm, so the hardware volume changed, let's reset our software volume */
1179     if (u->mixer_path->has_dB)
1180         pa_source_set_soft_volume(s, NULL);
1181 }
1182
1183 static void source_set_volume_cb(pa_source *s) {
1184     struct userdata *u = s->userdata;
1185     pa_cvolume r;
1186     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1187     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1188
1189     pa_assert(u);
1190     pa_assert(u->mixer_path);
1191     pa_assert(u->mixer_handle);
1192
1193     /* Shift up by the base volume */
1194     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1195
1196     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1197         return;
1198
1199     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1200     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1201
1202     u->hardware_volume = r;
1203
1204     if (u->mixer_path->has_dB) {
1205         pa_cvolume new_soft_volume;
1206         pa_bool_t accurate_enough;
1207         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1208
1209         /* Match exactly what the user requested by software */
1210         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1211
1212         /* If the adjustment to do in software is only minimal we
1213          * can skip it. That saves us CPU at the expense of a bit of
1214          * accuracy */
1215         accurate_enough =
1216             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1217             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1218
1219         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1220         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1221         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1222         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1223         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1224                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1225                      pa_yes_no(accurate_enough));
1226         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1227
1228         if (!accurate_enough)
1229             s->soft_volume = new_soft_volume;
1230
1231     } else {
1232         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1233
1234         /* We can't match exactly what the user requested, hence let's
1235          * at least tell the user about it */
1236
1237         s->real_volume = r;
1238     }
1239 }
1240
1241 static void source_write_volume_cb(pa_source *s) {
1242     struct userdata *u = s->userdata;
1243     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1244
1245     pa_assert(u);
1246     pa_assert(u->mixer_path);
1247     pa_assert(u->mixer_handle);
1248     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1249
1250     /* Shift up by the base volume */
1251     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1252
1253     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1254         pa_log_error("Writing HW volume failed");
1255     else {
1256         pa_cvolume tmp_vol;
1257         pa_bool_t accurate_enough;
1258
1259         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1260         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1261
1262         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1263         accurate_enough =
1264             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1265             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1266
1267         if (!accurate_enough) {
1268             union {
1269                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1270                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1271             } vol;
1272
1273             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1274                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1275                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1276             pa_log_debug("                                           in dB: %s (request) != %s",
1277                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1278                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1279         }
1280     }
1281 }
1282
1283 static void source_get_mute_cb(pa_source *s) {
1284     struct userdata *u = s->userdata;
1285     pa_bool_t b;
1286
1287     pa_assert(u);
1288     pa_assert(u->mixer_path);
1289     pa_assert(u->mixer_handle);
1290
1291     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1292         return;
1293
1294     s->muted = b;
1295 }
1296
1297 static void source_set_mute_cb(pa_source *s) {
1298     struct userdata *u = s->userdata;
1299
1300     pa_assert(u);
1301     pa_assert(u->mixer_path);
1302     pa_assert(u->mixer_handle);
1303
1304     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1305 }
1306
1307 static void mixer_volume_init(struct userdata *u) {
1308     pa_assert(u);
1309
1310     if (!u->mixer_path->has_volume) {
1311         pa_source_set_write_volume_callback(u->source, NULL);
1312         pa_source_set_get_volume_callback(u->source, NULL);
1313         pa_source_set_set_volume_callback(u->source, NULL);
1314
1315         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1316     } else {
1317         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1318         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1319
1320         if (u->mixer_path->has_dB && u->deferred_volume) {
1321             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1322             pa_log_info("Successfully enabled deferred volume.");
1323         } else
1324             pa_source_set_write_volume_callback(u->source, NULL);
1325
1326         if (u->mixer_path->has_dB) {
1327             pa_source_enable_decibel_volume(u->source, TRUE);
1328             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1329
1330             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1331             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1332
1333             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1334         } else {
1335             pa_source_enable_decibel_volume(u->source, FALSE);
1336             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1337
1338             u->source->base_volume = PA_VOLUME_NORM;
1339             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1340         }
1341
1342         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1343     }
1344
1345     if (!u->mixer_path->has_mute) {
1346         pa_source_set_get_mute_callback(u->source, NULL);
1347         pa_source_set_set_mute_callback(u->source, NULL);
1348         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1349     } else {
1350         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1351         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1352         pa_log_info("Using hardware mute control.");
1353     }
1354 }
1355
1356 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1357     struct userdata *u = s->userdata;
1358     pa_alsa_port_data *data;
1359
1360     pa_assert(u);
1361     pa_assert(p);
1362     pa_assert(u->mixer_handle);
1363
1364     data = PA_DEVICE_PORT_DATA(p);
1365
1366     pa_assert_se(u->mixer_path = data->path);
1367     pa_alsa_path_select(u->mixer_path, u->mixer_handle, s->muted);
1368
1369     if (data->setting)
1370         pa_alsa_setting_select(data->setting, u->mixer_handle);
1371
1372     mixer_volume_init(u);
1373
1374     if (s->set_mute)
1375         s->set_mute(s);
1376     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1377         if (s->write_volume)
1378             s->write_volume(s);
1379     } else {
1380         if (s->set_volume)
1381             s->set_volume(s);
1382     }
1383
1384     return 0;
1385 }
1386
1387 static void source_update_requested_latency_cb(pa_source *s) {
1388     struct userdata *u = s->userdata;
1389     pa_assert(u);
1390     pa_assert(u->use_tsched); /* only when timer scheduling is used
1391                                * we can dynamically adjust the
1392                                * latency */
1393
1394     if (!u->pcm_handle)
1395         return;
1396
1397     update_sw_params(u);
1398 }
1399
1400 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1401 {
1402     struct userdata *u = s->userdata;
1403     int i;
1404     pa_bool_t supported = FALSE;
1405
1406     pa_assert(u);
1407
1408     for (i = 0; u->rates[i]; i++) {
1409         if (u->rates[i] == rate) {
1410             supported = TRUE;
1411             break;
1412         }
1413     }
1414
1415     if (!supported) {
1416         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1417         return FALSE;
1418     }
1419
1420     if (!PA_SOURCE_IS_OPENED(s->state)) {
1421         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1422         u->source->sample_spec.rate = rate;
1423         return TRUE;
1424     }
1425
1426     return FALSE;
1427 }
1428
1429 static void thread_func(void *userdata) {
1430     struct userdata *u = userdata;
1431     unsigned short revents = 0;
1432
1433     pa_assert(u);
1434
1435     pa_log_debug("Thread starting up");
1436
1437     if (u->core->realtime_scheduling)
1438         pa_make_realtime(u->core->realtime_priority);
1439
1440     pa_thread_mq_install(&u->thread_mq);
1441
1442     for (;;) {
1443         int ret;
1444         pa_usec_t rtpoll_sleep = 0;
1445
1446 #ifdef DEBUG_TIMING
1447         pa_log_debug("Loop");
1448 #endif
1449
1450         /* Read some data and pass it to the sources */
1451         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1452             int work_done;
1453             pa_usec_t sleep_usec = 0;
1454             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1455
1456             if (u->first) {
1457                 pa_log_info("Starting capture.");
1458                 snd_pcm_start(u->pcm_handle);
1459
1460                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1461
1462                 u->first = FALSE;
1463             }
1464
1465             if (u->use_mmap)
1466                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1467             else
1468                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1469
1470             if (work_done < 0)
1471                 goto fail;
1472
1473 /*             pa_log_debug("work_done = %i", work_done); */
1474
1475             if (work_done)
1476                 update_smoother(u);
1477
1478             if (u->use_tsched) {
1479                 pa_usec_t cusec;
1480
1481                 /* OK, the capture buffer is now empty, let's
1482                  * calculate when to wake up next */
1483
1484 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1485
1486                 /* Convert from the sound card time domain to the
1487                  * system time domain */
1488                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1489
1490 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1491
1492                 /* We don't trust the conversion, so we wake up whatever comes first */
1493                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1494             }
1495         }
1496
1497         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1498             pa_usec_t volume_sleep;
1499             pa_source_volume_change_apply(u->source, &volume_sleep);
1500             if (volume_sleep > 0) {
1501                 if (rtpoll_sleep > 0)
1502                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1503                 else
1504                     rtpoll_sleep = volume_sleep;
1505             }
1506         }
1507
1508         if (rtpoll_sleep > 0)
1509             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1510         else
1511             pa_rtpoll_set_timer_disabled(u->rtpoll);
1512
1513         /* Hmm, nothing to do. Let's sleep */
1514         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1515             goto fail;
1516
1517         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1518             pa_source_volume_change_apply(u->source, NULL);
1519
1520         if (ret == 0)
1521             goto finish;
1522
1523         /* Tell ALSA about this and process its response */
1524         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1525             struct pollfd *pollfd;
1526             int err;
1527             unsigned n;
1528
1529             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1530
1531             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1532                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1533                 goto fail;
1534             }
1535
1536             if (revents & ~POLLIN) {
1537                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1538                     goto fail;
1539
1540                 u->first = TRUE;
1541                 revents = 0;
1542             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1543                 pa_log_debug("Wakeup from ALSA!");
1544
1545         } else
1546             revents = 0;
1547     }
1548
1549 fail:
1550     /* If this was no regular exit from the loop we have to continue
1551      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1552     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1553     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1554
1555 finish:
1556     pa_log_debug("Thread shutting down");
1557 }
1558
1559 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1560     const char *n;
1561     char *t;
1562
1563     pa_assert(data);
1564     pa_assert(ma);
1565     pa_assert(device_name);
1566
1567     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1568         pa_source_new_data_set_name(data, n);
1569         data->namereg_fail = TRUE;
1570         return;
1571     }
1572
1573     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1574         data->namereg_fail = TRUE;
1575     else {
1576         n = device_id ? device_id : device_name;
1577         data->namereg_fail = FALSE;
1578     }
1579
1580     if (mapping)
1581         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1582     else
1583         t = pa_sprintf_malloc("alsa_input.%s", n);
1584
1585     pa_source_new_data_set_name(data, t);
1586     pa_xfree(t);
1587 }
1588
1589 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1590     snd_hctl_t *hctl;
1591
1592     if (!mapping && !element)
1593         return;
1594
1595     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1596         pa_log_info("Failed to find a working mixer device.");
1597         return;
1598     }
1599
1600     if (element) {
1601
1602         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1603             goto fail;
1604
1605         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1606             goto fail;
1607
1608         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1609         pa_alsa_path_dump(u->mixer_path);
1610     } else if (!(u->mixer_path_set = mapping->input_path_set))
1611         goto fail;
1612
1613     return;
1614
1615 fail:
1616
1617     if (u->mixer_path) {
1618         pa_alsa_path_free(u->mixer_path);
1619         u->mixer_path = NULL;
1620     }
1621
1622     if (u->mixer_handle) {
1623         snd_mixer_close(u->mixer_handle);
1624         u->mixer_handle = NULL;
1625     }
1626 }
1627
1628 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1629     pa_bool_t need_mixer_callback = FALSE;
1630
1631     pa_assert(u);
1632
1633     if (!u->mixer_handle)
1634         return 0;
1635
1636     if (u->source->active_port) {
1637         pa_alsa_port_data *data;
1638
1639         /* We have a list of supported paths, so let's activate the
1640          * one that has been chosen as active */
1641
1642         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1643         u->mixer_path = data->path;
1644
1645         pa_alsa_path_select(data->path, u->mixer_handle, u->source->muted);
1646
1647         if (data->setting)
1648             pa_alsa_setting_select(data->setting, u->mixer_handle);
1649
1650     } else {
1651
1652         if (!u->mixer_path && u->mixer_path_set)
1653             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1654
1655         if (u->mixer_path) {
1656             /* Hmm, we have only a single path, then let's activate it */
1657
1658             pa_alsa_path_select(u->mixer_path, u->mixer_handle, u->source->muted);
1659
1660             if (u->mixer_path->settings)
1661                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1662         } else
1663             return 0;
1664     }
1665
1666     mixer_volume_init(u);
1667
1668     /* Will we need to register callbacks? */
1669     if (u->mixer_path_set && u->mixer_path_set->paths) {
1670         pa_alsa_path *p;
1671         void *state;
1672
1673         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1674             if (p->has_volume || p->has_mute)
1675                 need_mixer_callback = TRUE;
1676         }
1677     }
1678     else if (u->mixer_path)
1679         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1680
1681     if (need_mixer_callback) {
1682         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1683         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1684             u->mixer_pd = pa_alsa_mixer_pdata_new();
1685             mixer_callback = io_mixer_callback;
1686
1687             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1688                 pa_log("Failed to initialize file descriptor monitoring");
1689                 return -1;
1690             }
1691         } else {
1692             u->mixer_fdl = pa_alsa_fdlist_new();
1693             mixer_callback = ctl_mixer_callback;
1694
1695             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1696                 pa_log("Failed to initialize file descriptor monitoring");
1697                 return -1;
1698             }
1699         }
1700
1701         if (u->mixer_path_set)
1702             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1703         else
1704             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1705     }
1706
1707     return 0;
1708 }
1709
1710 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1711
1712     struct userdata *u = NULL;
1713     const char *dev_id = NULL;
1714     pa_sample_spec ss;
1715     uint32_t alternate_sample_rate;
1716     pa_channel_map map;
1717     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1718     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1719     size_t frame_size;
1720     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1721     pa_source_new_data data;
1722     pa_alsa_profile_set *profile_set = NULL;
1723
1724     pa_assert(m);
1725     pa_assert(ma);
1726
1727     ss = m->core->default_sample_spec;
1728     map = m->core->default_channel_map;
1729     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1730         pa_log("Failed to parse sample specification and channel map");
1731         goto fail;
1732     }
1733
1734     alternate_sample_rate = m->core->alternate_sample_rate;
1735     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1736         pa_log("Failed to parse alternate sample rate");
1737         goto fail;
1738     }
1739
1740     frame_size = pa_frame_size(&ss);
1741
1742     nfrags = m->core->default_n_fragments;
1743     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1744     if (frag_size <= 0)
1745         frag_size = (uint32_t) frame_size;
1746     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1747     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1748
1749     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1750         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1751         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1752         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1753         pa_log("Failed to parse buffer metrics");
1754         goto fail;
1755     }
1756
1757     buffer_size = nfrags * frag_size;
1758
1759     period_frames = frag_size/frame_size;
1760     buffer_frames = buffer_size/frame_size;
1761     tsched_frames = tsched_size/frame_size;
1762
1763     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1764         pa_log("Failed to parse mmap argument.");
1765         goto fail;
1766     }
1767
1768     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1769         pa_log("Failed to parse tsched argument.");
1770         goto fail;
1771     }
1772
1773     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1774         pa_log("Failed to parse ignore_dB argument.");
1775         goto fail;
1776     }
1777
1778     deferred_volume = m->core->deferred_volume;
1779     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1780         pa_log("Failed to parse deferred_volume argument.");
1781         goto fail;
1782     }
1783
1784     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1785         pa_log("Failed to parse fixed_latency_range argument.");
1786         goto fail;
1787     }
1788
1789     use_tsched = pa_alsa_may_tsched(use_tsched);
1790
1791     u = pa_xnew0(struct userdata, 1);
1792     u->core = m->core;
1793     u->module = m;
1794     u->use_mmap = use_mmap;
1795     u->use_tsched = use_tsched;
1796     u->deferred_volume = deferred_volume;
1797     u->fixed_latency_range = fixed_latency_range;
1798     u->first = TRUE;
1799     u->rtpoll = pa_rtpoll_new();
1800     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1801
1802     u->smoother = pa_smoother_new(
1803             SMOOTHER_ADJUST_USEC,
1804             SMOOTHER_WINDOW_USEC,
1805             TRUE,
1806             TRUE,
1807             5,
1808             pa_rtclock_now(),
1809             TRUE);
1810     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1811
1812     dev_id = pa_modargs_get_value(
1813             ma, "device_id",
1814             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1815
1816     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1817
1818     if (reserve_init(u, dev_id) < 0)
1819         goto fail;
1820
1821     if (reserve_monitor_init(u, dev_id) < 0)
1822         goto fail;
1823
1824     b = use_mmap;
1825     d = use_tsched;
1826
1827     if (mapping) {
1828
1829         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1830             pa_log("device_id= not set");
1831             goto fail;
1832         }
1833
1834         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1835                       dev_id,
1836                       &u->device_name,
1837                       &ss, &map,
1838                       SND_PCM_STREAM_CAPTURE,
1839                       &period_frames, &buffer_frames, tsched_frames,
1840                       &b, &d, mapping)))
1841             goto fail;
1842
1843     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1844
1845         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1846             goto fail;
1847
1848         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1849                       dev_id,
1850                       &u->device_name,
1851                       &ss, &map,
1852                       SND_PCM_STREAM_CAPTURE,
1853                       &period_frames, &buffer_frames, tsched_frames,
1854                       &b, &d, profile_set, &mapping)))
1855             goto fail;
1856
1857     } else {
1858
1859         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1860                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1861                       &u->device_name,
1862                       &ss, &map,
1863                       SND_PCM_STREAM_CAPTURE,
1864                       &period_frames, &buffer_frames, tsched_frames,
1865                       &b, &d, FALSE)))
1866             goto fail;
1867     }
1868
1869     pa_assert(u->device_name);
1870     pa_log_info("Successfully opened device %s.", u->device_name);
1871
1872     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1873         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1874         goto fail;
1875     }
1876
1877     if (mapping)
1878         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1879
1880     if (use_mmap && !b) {
1881         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1882         u->use_mmap = use_mmap = FALSE;
1883     }
1884
1885     if (use_tsched && (!b || !d)) {
1886         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1887         u->use_tsched = use_tsched = FALSE;
1888     }
1889
1890     if (u->use_mmap)
1891         pa_log_info("Successfully enabled mmap() mode.");
1892
1893     if (u->use_tsched) {
1894         pa_log_info("Successfully enabled timer-based scheduling mode.");
1895         if (u->fixed_latency_range)
1896             pa_log_info("Disabling latency range changes on overrun");
1897     }
1898
1899     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1900     if (!u->rates) {
1901         pa_log_error("Failed to find any supported sample rates.");
1902         goto fail;
1903     }
1904
1905     /* ALSA might tweak the sample spec, so recalculate the frame size */
1906     frame_size = pa_frame_size(&ss);
1907
1908     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1909
1910     pa_source_new_data_init(&data);
1911     data.driver = driver;
1912     data.module = m;
1913     data.card = card;
1914     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1915
1916     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1917      * variable instead of using &data.namereg_fail directly, because
1918      * data.namereg_fail is a bitfield and taking the address of a bitfield
1919      * variable is impossible. */
1920     namereg_fail = data.namereg_fail;
1921     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1922         pa_log("Failed to parse namereg_fail argument.");
1923         pa_source_new_data_done(&data);
1924         goto fail;
1925     }
1926     data.namereg_fail = namereg_fail;
1927
1928     pa_source_new_data_set_sample_spec(&data, &ss);
1929     pa_source_new_data_set_channel_map(&data, &map);
1930     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1931
1932     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1933     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1934     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1935     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1936     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1937
1938     if (mapping) {
1939         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1940         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1941     }
1942
1943     pa_alsa_init_description(data.proplist);
1944
1945     if (u->control_device)
1946         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1947
1948     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1949         pa_log("Invalid properties");
1950         pa_source_new_data_done(&data);
1951         goto fail;
1952     }
1953
1954     if (u->mixer_path_set)
1955         pa_alsa_add_ports(&data, u->mixer_path_set, card);
1956
1957     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1958     pa_source_new_data_done(&data);
1959
1960     if (!u->source) {
1961         pa_log("Failed to create source object");
1962         goto fail;
1963     }
1964
1965     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1966                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1967         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1968         goto fail;
1969     }
1970
1971     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1972                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1973         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1974         goto fail;
1975     }
1976
1977     u->source->parent.process_msg = source_process_msg;
1978     if (u->use_tsched)
1979         u->source->update_requested_latency = source_update_requested_latency_cb;
1980     u->source->set_state = source_set_state_cb;
1981     u->source->set_port = source_set_port_cb;
1982     if (u->source->alternate_sample_rate)
1983         u->source->update_rate = source_update_rate_cb;
1984     u->source->userdata = u;
1985
1986     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1987     pa_source_set_rtpoll(u->source, u->rtpoll);
1988
1989     u->frame_size = frame_size;
1990     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1991     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1992     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1993
1994     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1995                 (double) u->hwbuf_size / (double) u->fragment_size,
1996                 (long unsigned) u->fragment_size,
1997                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1998                 (long unsigned) u->hwbuf_size,
1999                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2000
2001     if (u->use_tsched) {
2002         u->tsched_watermark_ref = tsched_watermark;
2003         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2004     }
2005     else
2006         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2007
2008     reserve_update(u);
2009
2010     if (update_sw_params(u) < 0)
2011         goto fail;
2012
2013     if (setup_mixer(u, ignore_dB) < 0)
2014         goto fail;
2015
2016     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2017
2018     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2019         pa_log("Failed to create thread.");
2020         goto fail;
2021     }
2022
2023     /* Get initial mixer settings */
2024     if (data.volume_is_set) {
2025         if (u->source->set_volume)
2026             u->source->set_volume(u->source);
2027     } else {
2028         if (u->source->get_volume)
2029             u->source->get_volume(u->source);
2030     }
2031
2032     if (data.muted_is_set) {
2033         if (u->source->set_mute)
2034             u->source->set_mute(u->source);
2035     } else {
2036         if (u->source->get_mute)
2037             u->source->get_mute(u->source);
2038     }
2039
2040     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2041         u->source->write_volume(u->source);
2042
2043     pa_source_put(u->source);
2044
2045     if (profile_set)
2046         pa_alsa_profile_set_free(profile_set);
2047
2048     return u->source;
2049
2050 fail:
2051
2052     if (u)
2053         userdata_free(u);
2054
2055     if (profile_set)
2056         pa_alsa_profile_set_free(profile_set);
2057
2058     return NULL;
2059 }
2060
2061 static void userdata_free(struct userdata *u) {
2062     pa_assert(u);
2063
2064     if (u->source)
2065         pa_source_unlink(u->source);
2066
2067     if (u->thread) {
2068         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2069         pa_thread_free(u->thread);
2070     }
2071
2072     pa_thread_mq_done(&u->thread_mq);
2073
2074     if (u->source)
2075         pa_source_unref(u->source);
2076
2077     if (u->mixer_pd)
2078         pa_alsa_mixer_pdata_free(u->mixer_pd);
2079
2080     if (u->alsa_rtpoll_item)
2081         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2082
2083     if (u->rtpoll)
2084         pa_rtpoll_free(u->rtpoll);
2085
2086     if (u->pcm_handle) {
2087         snd_pcm_drop(u->pcm_handle);
2088         snd_pcm_close(u->pcm_handle);
2089     }
2090
2091     if (u->mixer_fdl)
2092         pa_alsa_fdlist_free(u->mixer_fdl);
2093
2094     if (u->mixer_path && !u->mixer_path_set)
2095         pa_alsa_path_free(u->mixer_path);
2096
2097     if (u->mixer_handle)
2098         snd_mixer_close(u->mixer_handle);
2099
2100     if (u->smoother)
2101         pa_smoother_free(u->smoother);
2102
2103     if (u->rates)
2104         pa_xfree(u->rates);
2105
2106     reserve_done(u);
2107     monitor_done(u);
2108
2109     pa_xfree(u->device_name);
2110     pa_xfree(u->control_device);
2111     pa_xfree(u->paths_dir);
2112     pa_xfree(u);
2113 }
2114
2115 void pa_alsa_source_free(pa_source *s) {
2116     struct userdata *u;
2117
2118     pa_source_assert_ref(s);
2119     pa_assert_se(u = s->userdata);
2120
2121     userdata_free(u);
2122 }