alsa: Merge pa_alsa_setting_select with pa_alsa_path_select
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
76 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84     pa_core *core;
85     pa_module *module;
86     pa_source *source;
87
88     pa_thread *thread;
89     pa_thread_mq thread_mq;
90     pa_rtpoll *rtpoll;
91
92     snd_pcm_t *pcm_handle;
93
94     char *paths_dir;
95     pa_alsa_fdlist *mixer_fdl;
96     pa_alsa_mixer_pdata *mixer_pd;
97     snd_mixer_t *mixer_handle;
98     pa_alsa_path_set *mixer_path_set;
99     pa_alsa_path *mixer_path;
100
101     pa_cvolume hardware_volume;
102
103     unsigned int *rates;
104
105     size_t
106         frame_size,
107         fragment_size,
108         hwbuf_size,
109         tsched_watermark,
110         tsched_watermark_ref,
111         hwbuf_unused,
112         min_sleep,
113         min_wakeup,
114         watermark_inc_step,
115         watermark_dec_step,
116         watermark_inc_threshold,
117         watermark_dec_threshold;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121
122     char *device_name;  /* name of the PCM device */
123     char *control_device; /* name of the control device */
124
125     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
126
127     pa_bool_t first;
128
129     pa_rtpoll_item *alsa_rtpoll_item;
130
131     pa_smoother *smoother;
132     uint64_t read_count;
133     pa_usec_t smoother_interval;
134     pa_usec_t last_smoother_update;
135
136     pa_reserve_wrapper *reserve;
137     pa_hook_slot *reserve_slot;
138     pa_reserve_monitor_wrapper *monitor;
139     pa_hook_slot *monitor_slot;
140 };
141
142 static void userdata_free(struct userdata *u);
143
144 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
145     pa_assert(r);
146     pa_assert(u);
147
148     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
149         return PA_HOOK_CANCEL;
150
151     return PA_HOOK_OK;
152 }
153
154 static void reserve_done(struct userdata *u) {
155     pa_assert(u);
156
157     if (u->reserve_slot) {
158         pa_hook_slot_free(u->reserve_slot);
159         u->reserve_slot = NULL;
160     }
161
162     if (u->reserve) {
163         pa_reserve_wrapper_unref(u->reserve);
164         u->reserve = NULL;
165     }
166 }
167
168 static void reserve_update(struct userdata *u) {
169     const char *description;
170     pa_assert(u);
171
172     if (!u->source || !u->reserve)
173         return;
174
175     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
176         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
177 }
178
179 static int reserve_init(struct userdata *u, const char *dname) {
180     char *rname;
181
182     pa_assert(u);
183     pa_assert(dname);
184
185     if (u->reserve)
186         return 0;
187
188     if (pa_in_system_mode())
189         return 0;
190
191     if (!(rname = pa_alsa_get_reserve_name(dname)))
192         return 0;
193
194     /* We are resuming, try to lock the device */
195     u->reserve = pa_reserve_wrapper_get(u->core, rname);
196     pa_xfree(rname);
197
198     if (!(u->reserve))
199         return -1;
200
201     reserve_update(u);
202
203     pa_assert(!u->reserve_slot);
204     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205
206     return 0;
207 }
208
209 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
210     pa_bool_t b;
211
212     pa_assert(w);
213     pa_assert(u);
214
215     b = PA_PTR_TO_UINT(busy) && !u->reserve;
216
217     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
218     return PA_HOOK_OK;
219 }
220
221 static void monitor_done(struct userdata *u) {
222     pa_assert(u);
223
224     if (u->monitor_slot) {
225         pa_hook_slot_free(u->monitor_slot);
226         u->monitor_slot = NULL;
227     }
228
229     if (u->monitor) {
230         pa_reserve_monitor_wrapper_unref(u->monitor);
231         u->monitor = NULL;
232     }
233 }
234
235 static int reserve_monitor_init(struct userdata *u, const char *dname) {
236     char *rname;
237
238     pa_assert(u);
239     pa_assert(dname);
240
241     if (pa_in_system_mode())
242         return 0;
243
244     if (!(rname = pa_alsa_get_reserve_name(dname)))
245         return 0;
246
247     /* We are resuming, try to lock the device */
248     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
249     pa_xfree(rname);
250
251     if (!(u->monitor))
252         return -1;
253
254     pa_assert(!u->monitor_slot);
255     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
256
257     return 0;
258 }
259
260 static void fix_min_sleep_wakeup(struct userdata *u) {
261     size_t max_use, max_use_2;
262
263     pa_assert(u);
264     pa_assert(u->use_tsched);
265
266     max_use = u->hwbuf_size - u->hwbuf_unused;
267     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
268
269     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
270     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
271
272     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
273     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
274 }
275
276 static void fix_tsched_watermark(struct userdata *u) {
277     size_t max_use;
278     pa_assert(u);
279     pa_assert(u->use_tsched);
280
281     max_use = u->hwbuf_size - u->hwbuf_unused;
282
283     if (u->tsched_watermark > max_use - u->min_sleep)
284         u->tsched_watermark = max_use - u->min_sleep;
285
286     if (u->tsched_watermark < u->min_wakeup)
287         u->tsched_watermark = u->min_wakeup;
288 }
289
290 static void increase_watermark(struct userdata *u) {
291     size_t old_watermark;
292     pa_usec_t old_min_latency, new_min_latency;
293
294     pa_assert(u);
295     pa_assert(u->use_tsched);
296
297     /* First, just try to increase the watermark */
298     old_watermark = u->tsched_watermark;
299     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
300     fix_tsched_watermark(u);
301
302     if (old_watermark != u->tsched_watermark) {
303         pa_log_info("Increasing wakeup watermark to %0.2f ms",
304                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
305         return;
306     }
307
308     /* Hmm, we cannot increase the watermark any further, hence let's
309      raise the latency unless doing so was disabled in
310      configuration */
311     if (u->fixed_latency_range)
312         return;
313
314     old_min_latency = u->source->thread_info.min_latency;
315     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
316     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
317
318     if (old_min_latency != new_min_latency) {
319         pa_log_info("Increasing minimal latency to %0.2f ms",
320                     (double) new_min_latency / PA_USEC_PER_MSEC);
321
322         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
323     }
324
325     /* When we reach this we're officialy fucked! */
326 }
327
328 static void decrease_watermark(struct userdata *u) {
329     size_t old_watermark;
330     pa_usec_t now;
331
332     pa_assert(u);
333     pa_assert(u->use_tsched);
334
335     now = pa_rtclock_now();
336
337     if (u->watermark_dec_not_before <= 0)
338         goto restart;
339
340     if (u->watermark_dec_not_before > now)
341         return;
342
343     old_watermark = u->tsched_watermark;
344
345     if (u->tsched_watermark < u->watermark_dec_step)
346         u->tsched_watermark = u->tsched_watermark / 2;
347     else
348         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
349
350     fix_tsched_watermark(u);
351
352     if (old_watermark != u->tsched_watermark)
353         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
354                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
355
356     /* We don't change the latency range*/
357
358 restart:
359     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
360 }
361
362 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
363     pa_usec_t wm, usec;
364
365     pa_assert(sleep_usec);
366     pa_assert(process_usec);
367
368     pa_assert(u);
369     pa_assert(u->use_tsched);
370
371     usec = pa_source_get_requested_latency_within_thread(u->source);
372
373     if (usec == (pa_usec_t) -1)
374         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
375
376     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
377
378     if (wm > usec)
379         wm = usec/2;
380
381     *sleep_usec = usec - wm;
382     *process_usec = wm;
383
384 #ifdef DEBUG_TIMING
385     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
386                  (unsigned long) (usec / PA_USEC_PER_MSEC),
387                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
388                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
389 #endif
390 }
391
392 static int try_recover(struct userdata *u, const char *call, int err) {
393     pa_assert(u);
394     pa_assert(call);
395     pa_assert(err < 0);
396
397     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
398
399     pa_assert(err != -EAGAIN);
400
401     if (err == -EPIPE)
402         pa_log_debug("%s: Buffer overrun!", call);
403
404     if (err == -ESTRPIPE)
405         pa_log_debug("%s: System suspended!", call);
406
407     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
408         pa_log("%s: %s", call, pa_alsa_strerror(err));
409         return -1;
410     }
411
412     u->first = TRUE;
413     return 0;
414 }
415
416 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
417     size_t left_to_record;
418     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
419     pa_bool_t overrun = FALSE;
420
421     /* We use <= instead of < for this check here because an overrun
422      * only happens after the last sample was processed, not already when
423      * it is removed from the buffer. This is particularly important
424      * when block transfer is used. */
425
426     if (n_bytes <= rec_space)
427         left_to_record = rec_space - n_bytes;
428     else {
429
430         /* We got a dropout. What a mess! */
431         left_to_record = 0;
432         overrun = TRUE;
433
434 #ifdef DEBUG_TIMING
435         PA_DEBUG_TRAP;
436 #endif
437
438         if (pa_log_ratelimit(PA_LOG_INFO))
439             pa_log_info("Overrun!");
440     }
441
442 #ifdef DEBUG_TIMING
443     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
444 #endif
445
446     if (u->use_tsched) {
447         pa_bool_t reset_not_before = TRUE;
448
449         if (overrun || left_to_record < u->watermark_inc_threshold)
450             increase_watermark(u);
451         else if (left_to_record > u->watermark_dec_threshold) {
452             reset_not_before = FALSE;
453
454             /* We decrease the watermark only if have actually
455              * been woken up by a timeout. If something else woke
456              * us up it's too easy to fulfill the deadlines... */
457
458             if (on_timeout)
459                 decrease_watermark(u);
460         }
461
462         if (reset_not_before)
463             u->watermark_dec_not_before = 0;
464     }
465
466     return left_to_record;
467 }
468
469 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
470     pa_bool_t work_done = FALSE;
471     pa_usec_t max_sleep_usec = 0, process_usec = 0;
472     size_t left_to_record;
473     unsigned j = 0;
474
475     pa_assert(u);
476     pa_source_assert_ref(u->source);
477
478     if (u->use_tsched)
479         hw_sleep_time(u, &max_sleep_usec, &process_usec);
480
481     for (;;) {
482         snd_pcm_sframes_t n;
483         size_t n_bytes;
484         int r;
485         pa_bool_t after_avail = TRUE;
486
487         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
488
489             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
490                 continue;
491
492             return r;
493         }
494
495         n_bytes = (size_t) n * u->frame_size;
496
497 #ifdef DEBUG_TIMING
498         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
499 #endif
500
501         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
502         on_timeout = FALSE;
503
504         if (u->use_tsched)
505             if (!polled &&
506                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
507 #ifdef DEBUG_TIMING
508                 pa_log_debug("Not reading, because too early.");
509 #endif
510                 break;
511             }
512
513         if (PA_UNLIKELY(n_bytes <= 0)) {
514
515             if (polled)
516                 PA_ONCE_BEGIN {
517                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
518                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
519                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
520                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
521                            pa_strnull(dn));
522                     pa_xfree(dn);
523                 } PA_ONCE_END;
524
525 #ifdef DEBUG_TIMING
526             pa_log_debug("Not reading, because not necessary.");
527 #endif
528             break;
529         }
530
531
532         if (++j > 10) {
533 #ifdef DEBUG_TIMING
534             pa_log_debug("Not filling up, because already too many iterations.");
535 #endif
536
537             break;
538         }
539
540         polled = FALSE;
541
542 #ifdef DEBUG_TIMING
543         pa_log_debug("Reading");
544 #endif
545
546         for (;;) {
547             pa_memchunk chunk;
548             void *p;
549             int err;
550             const snd_pcm_channel_area_t *areas;
551             snd_pcm_uframes_t offset, frames;
552             snd_pcm_sframes_t sframes;
553
554             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
555 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
556
557             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
558
559                 if (!after_avail && err == -EAGAIN)
560                     break;
561
562                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
563                     continue;
564
565                 return r;
566             }
567
568             /* Make sure that if these memblocks need to be copied they will fit into one slot */
569             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
570                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
571
572             if (!after_avail && frames == 0)
573                 break;
574
575             pa_assert(frames > 0);
576             after_avail = FALSE;
577
578             /* Check these are multiples of 8 bit */
579             pa_assert((areas[0].first & 7) == 0);
580             pa_assert((areas[0].step & 7)== 0);
581
582             /* We assume a single interleaved memory buffer */
583             pa_assert((areas[0].first >> 3) == 0);
584             pa_assert((areas[0].step >> 3) == u->frame_size);
585
586             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
587
588             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
589             chunk.length = pa_memblock_get_length(chunk.memblock);
590             chunk.index = 0;
591
592             pa_source_post(u->source, &chunk);
593             pa_memblock_unref_fixed(chunk.memblock);
594
595             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
596
597                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
598                     continue;
599
600                 return r;
601             }
602
603             work_done = TRUE;
604
605             u->read_count += frames * u->frame_size;
606
607 #ifdef DEBUG_TIMING
608             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
609 #endif
610
611             if ((size_t) frames * u->frame_size >= n_bytes)
612                 break;
613
614             n_bytes -= (size_t) frames * u->frame_size;
615         }
616     }
617
618     if (u->use_tsched) {
619         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
620         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
621
622         if (*sleep_usec > process_usec)
623             *sleep_usec -= process_usec;
624         else
625             *sleep_usec = 0;
626     }
627
628     return work_done ? 1 : 0;
629 }
630
631 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
632     int work_done = FALSE;
633     pa_usec_t max_sleep_usec = 0, process_usec = 0;
634     size_t left_to_record;
635     unsigned j = 0;
636
637     pa_assert(u);
638     pa_source_assert_ref(u->source);
639
640     if (u->use_tsched)
641         hw_sleep_time(u, &max_sleep_usec, &process_usec);
642
643     for (;;) {
644         snd_pcm_sframes_t n;
645         size_t n_bytes;
646         int r;
647         pa_bool_t after_avail = TRUE;
648
649         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
650
651             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
652                 continue;
653
654             return r;
655         }
656
657         n_bytes = (size_t) n * u->frame_size;
658         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
659         on_timeout = FALSE;
660
661         if (u->use_tsched)
662             if (!polled &&
663                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
664                 break;
665
666         if (PA_UNLIKELY(n_bytes <= 0)) {
667
668             if (polled)
669                 PA_ONCE_BEGIN {
670                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
671                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
672                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
673                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
674                            pa_strnull(dn));
675                     pa_xfree(dn);
676                 } PA_ONCE_END;
677
678             break;
679         }
680
681         if (++j > 10) {
682 #ifdef DEBUG_TIMING
683             pa_log_debug("Not filling up, because already too many iterations.");
684 #endif
685
686             break;
687         }
688
689         polled = FALSE;
690
691         for (;;) {
692             void *p;
693             snd_pcm_sframes_t frames;
694             pa_memchunk chunk;
695
696             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
697
698             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
699
700             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
701                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
702
703 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
704
705             p = pa_memblock_acquire(chunk.memblock);
706             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
707             pa_memblock_release(chunk.memblock);
708
709             if (PA_UNLIKELY(frames < 0)) {
710                 pa_memblock_unref(chunk.memblock);
711
712                 if (!after_avail && (int) frames == -EAGAIN)
713                     break;
714
715                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
716                     continue;
717
718                 return r;
719             }
720
721             if (!after_avail && frames == 0) {
722                 pa_memblock_unref(chunk.memblock);
723                 break;
724             }
725
726             pa_assert(frames > 0);
727             after_avail = FALSE;
728
729             chunk.index = 0;
730             chunk.length = (size_t) frames * u->frame_size;
731
732             pa_source_post(u->source, &chunk);
733             pa_memblock_unref(chunk.memblock);
734
735             work_done = TRUE;
736
737             u->read_count += frames * u->frame_size;
738
739 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
740
741             if ((size_t) frames * u->frame_size >= n_bytes)
742                 break;
743
744             n_bytes -= (size_t) frames * u->frame_size;
745         }
746     }
747
748     if (u->use_tsched) {
749         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
750         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
751
752         if (*sleep_usec > process_usec)
753             *sleep_usec -= process_usec;
754         else
755             *sleep_usec = 0;
756     }
757
758     return work_done ? 1 : 0;
759 }
760
761 static void update_smoother(struct userdata *u) {
762     snd_pcm_sframes_t delay = 0;
763     uint64_t position;
764     int err;
765     pa_usec_t now1 = 0, now2;
766     snd_pcm_status_t *status;
767
768     snd_pcm_status_alloca(&status);
769
770     pa_assert(u);
771     pa_assert(u->pcm_handle);
772
773     /* Let's update the time smoother */
774
775     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
776         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
777         return;
778     }
779
780     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
781         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
782     else {
783         snd_htimestamp_t htstamp = { 0, 0 };
784         snd_pcm_status_get_htstamp(status, &htstamp);
785         now1 = pa_timespec_load(&htstamp);
786     }
787
788     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
789     if (now1 <= 0)
790         now1 = pa_rtclock_now();
791
792     /* check if the time since the last update is bigger than the interval */
793     if (u->last_smoother_update > 0)
794         if (u->last_smoother_update + u->smoother_interval > now1)
795             return;
796
797     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
798     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
799
800     pa_smoother_put(u->smoother, now1, now2);
801
802     u->last_smoother_update = now1;
803     /* exponentially increase the update interval up to the MAX limit */
804     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
805 }
806
807 static pa_usec_t source_get_latency(struct userdata *u) {
808     int64_t delay;
809     pa_usec_t now1, now2;
810
811     pa_assert(u);
812
813     now1 = pa_rtclock_now();
814     now2 = pa_smoother_get(u->smoother, now1);
815
816     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
817
818     return delay >= 0 ? (pa_usec_t) delay : 0;
819 }
820
821 static int build_pollfd(struct userdata *u) {
822     pa_assert(u);
823     pa_assert(u->pcm_handle);
824
825     if (u->alsa_rtpoll_item)
826         pa_rtpoll_item_free(u->alsa_rtpoll_item);
827
828     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
829         return -1;
830
831     return 0;
832 }
833
834 /* Called from IO context */
835 static int suspend(struct userdata *u) {
836     pa_assert(u);
837     pa_assert(u->pcm_handle);
838
839     pa_smoother_pause(u->smoother, pa_rtclock_now());
840
841     /* Let's suspend */
842     snd_pcm_close(u->pcm_handle);
843     u->pcm_handle = NULL;
844
845     if (u->alsa_rtpoll_item) {
846         pa_rtpoll_item_free(u->alsa_rtpoll_item);
847         u->alsa_rtpoll_item = NULL;
848     }
849
850     pa_log_info("Device suspended...");
851
852     return 0;
853 }
854
855 /* Called from IO context */
856 static int update_sw_params(struct userdata *u) {
857     snd_pcm_uframes_t avail_min;
858     int err;
859
860     pa_assert(u);
861
862     /* Use the full buffer if no one asked us for anything specific */
863     u->hwbuf_unused = 0;
864
865     if (u->use_tsched) {
866         pa_usec_t latency;
867
868         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
869             size_t b;
870
871             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
872
873             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
874
875             /* We need at least one sample in our buffer */
876
877             if (PA_UNLIKELY(b < u->frame_size))
878                 b = u->frame_size;
879
880             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
881         }
882
883         fix_min_sleep_wakeup(u);
884         fix_tsched_watermark(u);
885     }
886
887     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
888
889     avail_min = 1;
890
891     if (u->use_tsched) {
892         pa_usec_t sleep_usec, process_usec;
893
894         hw_sleep_time(u, &sleep_usec, &process_usec);
895         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
896     }
897
898     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
899
900     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
901         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
902         return err;
903     }
904
905     return 0;
906 }
907
908 /* Called from IO Context on unsuspend or from main thread when creating source */
909 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
910                             pa_bool_t in_thread)
911 {
912     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
913                                                     &u->source->sample_spec);
914
915     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
916     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
917
918     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
919     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
920
921     fix_min_sleep_wakeup(u);
922     fix_tsched_watermark(u);
923
924     if (in_thread)
925         pa_source_set_latency_range_within_thread(u->source,
926                                                   u->min_latency_ref,
927                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
928     else {
929         pa_source_set_latency_range(u->source,
930                                     0,
931                                     pa_bytes_to_usec(u->hwbuf_size, ss));
932
933         /* work-around assert in pa_source_set_latency_within_thead,
934            keep track of min_latency and reuse it when
935            this routine is called from IO context */
936         u->min_latency_ref = u->source->thread_info.min_latency;
937     }
938
939     pa_log_info("Time scheduling watermark is %0.2fms",
940                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
941 }
942
943 /* Called from IO context */
944 static int unsuspend(struct userdata *u) {
945     pa_sample_spec ss;
946     int err;
947     pa_bool_t b, d;
948     snd_pcm_uframes_t period_size, buffer_size;
949
950     pa_assert(u);
951     pa_assert(!u->pcm_handle);
952
953     pa_log_info("Trying resume...");
954
955     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
956                             SND_PCM_NONBLOCK|
957                             SND_PCM_NO_AUTO_RESAMPLE|
958                             SND_PCM_NO_AUTO_CHANNELS|
959                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
960         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
961         goto fail;
962     }
963
964     ss = u->source->sample_spec;
965     period_size = u->fragment_size / u->frame_size;
966     buffer_size = u->hwbuf_size / u->frame_size;
967     b = u->use_mmap;
968     d = u->use_tsched;
969
970     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
971         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
972         goto fail;
973     }
974
975     if (b != u->use_mmap || d != u->use_tsched) {
976         pa_log_warn("Resume failed, couldn't get original access mode.");
977         goto fail;
978     }
979
980     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
981         pa_log_warn("Resume failed, couldn't restore original sample settings.");
982         goto fail;
983     }
984
985     if (period_size*u->frame_size != u->fragment_size ||
986         buffer_size*u->frame_size != u->hwbuf_size) {
987         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
988                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
989                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
990         goto fail;
991     }
992
993     if (update_sw_params(u) < 0)
994         goto fail;
995
996     if (build_pollfd(u) < 0)
997         goto fail;
998
999     /* FIXME: We need to reload the volume somehow */
1000
1001     u->read_count = 0;
1002     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1003     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1004     u->last_smoother_update = 0;
1005
1006     u->first = TRUE;
1007
1008     /* reset the watermark to the value defined when source was created */
1009     if (u->use_tsched)
1010         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1011
1012     pa_log_info("Resumed successfully...");
1013
1014     return 0;
1015
1016 fail:
1017     if (u->pcm_handle) {
1018         snd_pcm_close(u->pcm_handle);
1019         u->pcm_handle = NULL;
1020     }
1021
1022     return -PA_ERR_IO;
1023 }
1024
1025 /* Called from IO context */
1026 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1027     struct userdata *u = PA_SOURCE(o)->userdata;
1028
1029     switch (code) {
1030
1031         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1032             pa_usec_t r = 0;
1033
1034             if (u->pcm_handle)
1035                 r = source_get_latency(u);
1036
1037             *((pa_usec_t*) data) = r;
1038
1039             return 0;
1040         }
1041
1042         case PA_SOURCE_MESSAGE_SET_STATE:
1043
1044             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1045
1046                 case PA_SOURCE_SUSPENDED: {
1047                     int r;
1048
1049                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1050
1051                     if ((r = suspend(u)) < 0)
1052                         return r;
1053
1054                     break;
1055                 }
1056
1057                 case PA_SOURCE_IDLE:
1058                 case PA_SOURCE_RUNNING: {
1059                     int r;
1060
1061                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1062                         if (build_pollfd(u) < 0)
1063                             return -PA_ERR_IO;
1064                     }
1065
1066                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1067                         if ((r = unsuspend(u)) < 0)
1068                             return r;
1069                     }
1070
1071                     break;
1072                 }
1073
1074                 case PA_SOURCE_UNLINKED:
1075                 case PA_SOURCE_INIT:
1076                 case PA_SOURCE_INVALID_STATE:
1077                     ;
1078             }
1079
1080             break;
1081     }
1082
1083     return pa_source_process_msg(o, code, data, offset, chunk);
1084 }
1085
1086 /* Called from main context */
1087 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1088     pa_source_state_t old_state;
1089     struct userdata *u;
1090
1091     pa_source_assert_ref(s);
1092     pa_assert_se(u = s->userdata);
1093
1094     old_state = pa_source_get_state(u->source);
1095
1096     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1097         reserve_done(u);
1098     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1099         if (reserve_init(u, u->device_name) < 0)
1100             return -PA_ERR_BUSY;
1101
1102     return 0;
1103 }
1104
1105 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1106     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1107
1108     pa_assert(u);
1109     pa_assert(u->mixer_handle);
1110
1111     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1112         return 0;
1113
1114     if (!PA_SOURCE_IS_LINKED(u->source->state))
1115         return 0;
1116
1117     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1118         pa_source_set_mixer_dirty(u->source, TRUE);
1119         return 0;
1120     }
1121
1122     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1123         pa_source_get_volume(u->source, TRUE);
1124         pa_source_get_mute(u->source, TRUE);
1125     }
1126
1127     return 0;
1128 }
1129
1130 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1131     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1132
1133     pa_assert(u);
1134     pa_assert(u->mixer_handle);
1135
1136     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1137         return 0;
1138
1139     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1140         pa_source_set_mixer_dirty(u->source, TRUE);
1141         return 0;
1142     }
1143
1144     if (mask & SND_CTL_EVENT_MASK_VALUE)
1145         pa_source_update_volume_and_mute(u->source);
1146
1147     return 0;
1148 }
1149
1150 static void source_get_volume_cb(pa_source *s) {
1151     struct userdata *u = s->userdata;
1152     pa_cvolume r;
1153     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1154
1155     pa_assert(u);
1156     pa_assert(u->mixer_path);
1157     pa_assert(u->mixer_handle);
1158
1159     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1160         return;
1161
1162     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1163     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1164
1165     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1166
1167     if (u->mixer_path->has_dB) {
1168         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1169
1170         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1171     }
1172
1173     if (pa_cvolume_equal(&u->hardware_volume, &r))
1174         return;
1175
1176     s->real_volume = u->hardware_volume = r;
1177
1178     /* Hmm, so the hardware volume changed, let's reset our software volume */
1179     if (u->mixer_path->has_dB)
1180         pa_source_set_soft_volume(s, NULL);
1181 }
1182
1183 static void source_set_volume_cb(pa_source *s) {
1184     struct userdata *u = s->userdata;
1185     pa_cvolume r;
1186     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1187     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1188
1189     pa_assert(u);
1190     pa_assert(u->mixer_path);
1191     pa_assert(u->mixer_handle);
1192
1193     /* Shift up by the base volume */
1194     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1195
1196     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1197         return;
1198
1199     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1200     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1201
1202     u->hardware_volume = r;
1203
1204     if (u->mixer_path->has_dB) {
1205         pa_cvolume new_soft_volume;
1206         pa_bool_t accurate_enough;
1207         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1208
1209         /* Match exactly what the user requested by software */
1210         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1211
1212         /* If the adjustment to do in software is only minimal we
1213          * can skip it. That saves us CPU at the expense of a bit of
1214          * accuracy */
1215         accurate_enough =
1216             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1217             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1218
1219         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1220         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1221         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1222         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1223         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1224                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1225                      pa_yes_no(accurate_enough));
1226         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1227
1228         if (!accurate_enough)
1229             s->soft_volume = new_soft_volume;
1230
1231     } else {
1232         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1233
1234         /* We can't match exactly what the user requested, hence let's
1235          * at least tell the user about it */
1236
1237         s->real_volume = r;
1238     }
1239 }
1240
1241 static void source_write_volume_cb(pa_source *s) {
1242     struct userdata *u = s->userdata;
1243     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1244
1245     pa_assert(u);
1246     pa_assert(u->mixer_path);
1247     pa_assert(u->mixer_handle);
1248     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1249
1250     /* Shift up by the base volume */
1251     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1252
1253     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1254         pa_log_error("Writing HW volume failed");
1255     else {
1256         pa_cvolume tmp_vol;
1257         pa_bool_t accurate_enough;
1258
1259         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1260         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1261
1262         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1263         accurate_enough =
1264             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1265             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1266
1267         if (!accurate_enough) {
1268             union {
1269                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1270                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1271             } vol;
1272
1273             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1274                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1275                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1276             pa_log_debug("                                           in dB: %s (request) != %s",
1277                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1278                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1279         }
1280     }
1281 }
1282
1283 static void source_get_mute_cb(pa_source *s) {
1284     struct userdata *u = s->userdata;
1285     pa_bool_t b;
1286
1287     pa_assert(u);
1288     pa_assert(u->mixer_path);
1289     pa_assert(u->mixer_handle);
1290
1291     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1292         return;
1293
1294     s->muted = b;
1295 }
1296
1297 static void source_set_mute_cb(pa_source *s) {
1298     struct userdata *u = s->userdata;
1299
1300     pa_assert(u);
1301     pa_assert(u->mixer_path);
1302     pa_assert(u->mixer_handle);
1303
1304     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1305 }
1306
1307 static void mixer_volume_init(struct userdata *u) {
1308     pa_assert(u);
1309
1310     if (!u->mixer_path->has_volume) {
1311         pa_source_set_write_volume_callback(u->source, NULL);
1312         pa_source_set_get_volume_callback(u->source, NULL);
1313         pa_source_set_set_volume_callback(u->source, NULL);
1314
1315         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1316     } else {
1317         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1318         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1319
1320         if (u->mixer_path->has_dB && u->deferred_volume) {
1321             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1322             pa_log_info("Successfully enabled deferred volume.");
1323         } else
1324             pa_source_set_write_volume_callback(u->source, NULL);
1325
1326         if (u->mixer_path->has_dB) {
1327             pa_source_enable_decibel_volume(u->source, TRUE);
1328             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1329
1330             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1331             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1332
1333             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1334         } else {
1335             pa_source_enable_decibel_volume(u->source, FALSE);
1336             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1337
1338             u->source->base_volume = PA_VOLUME_NORM;
1339             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1340         }
1341
1342         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1343     }
1344
1345     if (!u->mixer_path->has_mute) {
1346         pa_source_set_get_mute_callback(u->source, NULL);
1347         pa_source_set_set_mute_callback(u->source, NULL);
1348         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1349     } else {
1350         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1351         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1352         pa_log_info("Using hardware mute control.");
1353     }
1354 }
1355
1356 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1357     struct userdata *u = s->userdata;
1358     pa_alsa_port_data *data;
1359
1360     pa_assert(u);
1361     pa_assert(p);
1362     pa_assert(u->mixer_handle);
1363
1364     data = PA_DEVICE_PORT_DATA(p);
1365
1366     pa_assert_se(u->mixer_path = data->path);
1367     pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1368
1369     mixer_volume_init(u);
1370
1371     if (s->set_mute)
1372         s->set_mute(s);
1373     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1374         if (s->write_volume)
1375             s->write_volume(s);
1376     } else {
1377         if (s->set_volume)
1378             s->set_volume(s);
1379     }
1380
1381     return 0;
1382 }
1383
1384 static void source_update_requested_latency_cb(pa_source *s) {
1385     struct userdata *u = s->userdata;
1386     pa_assert(u);
1387     pa_assert(u->use_tsched); /* only when timer scheduling is used
1388                                * we can dynamically adjust the
1389                                * latency */
1390
1391     if (!u->pcm_handle)
1392         return;
1393
1394     update_sw_params(u);
1395 }
1396
1397 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1398 {
1399     struct userdata *u = s->userdata;
1400     int i;
1401     pa_bool_t supported = FALSE;
1402
1403     pa_assert(u);
1404
1405     for (i = 0; u->rates[i]; i++) {
1406         if (u->rates[i] == rate) {
1407             supported = TRUE;
1408             break;
1409         }
1410     }
1411
1412     if (!supported) {
1413         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1414         return FALSE;
1415     }
1416
1417     if (!PA_SOURCE_IS_OPENED(s->state)) {
1418         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1419         u->source->sample_spec.rate = rate;
1420         return TRUE;
1421     }
1422
1423     return FALSE;
1424 }
1425
1426 static void thread_func(void *userdata) {
1427     struct userdata *u = userdata;
1428     unsigned short revents = 0;
1429
1430     pa_assert(u);
1431
1432     pa_log_debug("Thread starting up");
1433
1434     if (u->core->realtime_scheduling)
1435         pa_make_realtime(u->core->realtime_priority);
1436
1437     pa_thread_mq_install(&u->thread_mq);
1438
1439     for (;;) {
1440         int ret;
1441         pa_usec_t rtpoll_sleep = 0;
1442
1443 #ifdef DEBUG_TIMING
1444         pa_log_debug("Loop");
1445 #endif
1446
1447         /* Read some data and pass it to the sources */
1448         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1449             int work_done;
1450             pa_usec_t sleep_usec = 0;
1451             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1452
1453             if (u->first) {
1454                 pa_log_info("Starting capture.");
1455                 snd_pcm_start(u->pcm_handle);
1456
1457                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1458
1459                 u->first = FALSE;
1460             }
1461
1462             if (u->use_mmap)
1463                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1464             else
1465                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1466
1467             if (work_done < 0)
1468                 goto fail;
1469
1470 /*             pa_log_debug("work_done = %i", work_done); */
1471
1472             if (work_done)
1473                 update_smoother(u);
1474
1475             if (u->use_tsched) {
1476                 pa_usec_t cusec;
1477
1478                 /* OK, the capture buffer is now empty, let's
1479                  * calculate when to wake up next */
1480
1481 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1482
1483                 /* Convert from the sound card time domain to the
1484                  * system time domain */
1485                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1486
1487 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1488
1489                 /* We don't trust the conversion, so we wake up whatever comes first */
1490                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1491             }
1492         }
1493
1494         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1495             pa_usec_t volume_sleep;
1496             pa_source_volume_change_apply(u->source, &volume_sleep);
1497             if (volume_sleep > 0) {
1498                 if (rtpoll_sleep > 0)
1499                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1500                 else
1501                     rtpoll_sleep = volume_sleep;
1502             }
1503         }
1504
1505         if (rtpoll_sleep > 0)
1506             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1507         else
1508             pa_rtpoll_set_timer_disabled(u->rtpoll);
1509
1510         /* Hmm, nothing to do. Let's sleep */
1511         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1512             goto fail;
1513
1514         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1515             pa_source_volume_change_apply(u->source, NULL);
1516
1517         if (ret == 0)
1518             goto finish;
1519
1520         /* Tell ALSA about this and process its response */
1521         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1522             struct pollfd *pollfd;
1523             int err;
1524             unsigned n;
1525
1526             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1527
1528             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1529                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1530                 goto fail;
1531             }
1532
1533             if (revents & ~POLLIN) {
1534                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1535                     goto fail;
1536
1537                 u->first = TRUE;
1538                 revents = 0;
1539             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1540                 pa_log_debug("Wakeup from ALSA!");
1541
1542         } else
1543             revents = 0;
1544     }
1545
1546 fail:
1547     /* If this was no regular exit from the loop we have to continue
1548      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1549     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1550     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1551
1552 finish:
1553     pa_log_debug("Thread shutting down");
1554 }
1555
1556 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1557     const char *n;
1558     char *t;
1559
1560     pa_assert(data);
1561     pa_assert(ma);
1562     pa_assert(device_name);
1563
1564     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1565         pa_source_new_data_set_name(data, n);
1566         data->namereg_fail = TRUE;
1567         return;
1568     }
1569
1570     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1571         data->namereg_fail = TRUE;
1572     else {
1573         n = device_id ? device_id : device_name;
1574         data->namereg_fail = FALSE;
1575     }
1576
1577     if (mapping)
1578         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1579     else
1580         t = pa_sprintf_malloc("alsa_input.%s", n);
1581
1582     pa_source_new_data_set_name(data, t);
1583     pa_xfree(t);
1584 }
1585
1586 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1587     snd_hctl_t *hctl;
1588
1589     if (!mapping && !element)
1590         return;
1591
1592     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1593         pa_log_info("Failed to find a working mixer device.");
1594         return;
1595     }
1596
1597     if (element) {
1598
1599         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1600             goto fail;
1601
1602         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1603             goto fail;
1604
1605         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1606         pa_alsa_path_dump(u->mixer_path);
1607     } else if (!(u->mixer_path_set = mapping->input_path_set))
1608         goto fail;
1609
1610     return;
1611
1612 fail:
1613
1614     if (u->mixer_path) {
1615         pa_alsa_path_free(u->mixer_path);
1616         u->mixer_path = NULL;
1617     }
1618
1619     if (u->mixer_handle) {
1620         snd_mixer_close(u->mixer_handle);
1621         u->mixer_handle = NULL;
1622     }
1623 }
1624
1625 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1626     pa_bool_t need_mixer_callback = FALSE;
1627
1628     pa_assert(u);
1629
1630     if (!u->mixer_handle)
1631         return 0;
1632
1633     if (u->source->active_port) {
1634         pa_alsa_port_data *data;
1635
1636         /* We have a list of supported paths, so let's activate the
1637          * one that has been chosen as active */
1638
1639         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1640         u->mixer_path = data->path;
1641
1642         pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1643
1644     } else {
1645
1646         if (!u->mixer_path && u->mixer_path_set)
1647             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1648
1649         if (u->mixer_path) {
1650             /* Hmm, we have only a single path, then let's activate it */
1651
1652             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1653         } else
1654             return 0;
1655     }
1656
1657     mixer_volume_init(u);
1658
1659     /* Will we need to register callbacks? */
1660     if (u->mixer_path_set && u->mixer_path_set->paths) {
1661         pa_alsa_path *p;
1662         void *state;
1663
1664         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1665             if (p->has_volume || p->has_mute)
1666                 need_mixer_callback = TRUE;
1667         }
1668     }
1669     else if (u->mixer_path)
1670         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1671
1672     if (need_mixer_callback) {
1673         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1674         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1675             u->mixer_pd = pa_alsa_mixer_pdata_new();
1676             mixer_callback = io_mixer_callback;
1677
1678             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1679                 pa_log("Failed to initialize file descriptor monitoring");
1680                 return -1;
1681             }
1682         } else {
1683             u->mixer_fdl = pa_alsa_fdlist_new();
1684             mixer_callback = ctl_mixer_callback;
1685
1686             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1687                 pa_log("Failed to initialize file descriptor monitoring");
1688                 return -1;
1689             }
1690         }
1691
1692         if (u->mixer_path_set)
1693             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1694         else
1695             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1696     }
1697
1698     return 0;
1699 }
1700
1701 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1702
1703     struct userdata *u = NULL;
1704     const char *dev_id = NULL;
1705     pa_sample_spec ss;
1706     uint32_t alternate_sample_rate;
1707     pa_channel_map map;
1708     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1709     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1710     size_t frame_size;
1711     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1712     pa_source_new_data data;
1713     pa_alsa_profile_set *profile_set = NULL;
1714
1715     pa_assert(m);
1716     pa_assert(ma);
1717
1718     ss = m->core->default_sample_spec;
1719     map = m->core->default_channel_map;
1720     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1721         pa_log("Failed to parse sample specification and channel map");
1722         goto fail;
1723     }
1724
1725     alternate_sample_rate = m->core->alternate_sample_rate;
1726     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1727         pa_log("Failed to parse alternate sample rate");
1728         goto fail;
1729     }
1730
1731     frame_size = pa_frame_size(&ss);
1732
1733     nfrags = m->core->default_n_fragments;
1734     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1735     if (frag_size <= 0)
1736         frag_size = (uint32_t) frame_size;
1737     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1738     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1739
1740     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1741         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1742         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1743         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1744         pa_log("Failed to parse buffer metrics");
1745         goto fail;
1746     }
1747
1748     buffer_size = nfrags * frag_size;
1749
1750     period_frames = frag_size/frame_size;
1751     buffer_frames = buffer_size/frame_size;
1752     tsched_frames = tsched_size/frame_size;
1753
1754     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1755         pa_log("Failed to parse mmap argument.");
1756         goto fail;
1757     }
1758
1759     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1760         pa_log("Failed to parse tsched argument.");
1761         goto fail;
1762     }
1763
1764     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1765         pa_log("Failed to parse ignore_dB argument.");
1766         goto fail;
1767     }
1768
1769     deferred_volume = m->core->deferred_volume;
1770     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1771         pa_log("Failed to parse deferred_volume argument.");
1772         goto fail;
1773     }
1774
1775     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1776         pa_log("Failed to parse fixed_latency_range argument.");
1777         goto fail;
1778     }
1779
1780     use_tsched = pa_alsa_may_tsched(use_tsched);
1781
1782     u = pa_xnew0(struct userdata, 1);
1783     u->core = m->core;
1784     u->module = m;
1785     u->use_mmap = use_mmap;
1786     u->use_tsched = use_tsched;
1787     u->deferred_volume = deferred_volume;
1788     u->fixed_latency_range = fixed_latency_range;
1789     u->first = TRUE;
1790     u->rtpoll = pa_rtpoll_new();
1791     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1792
1793     u->smoother = pa_smoother_new(
1794             SMOOTHER_ADJUST_USEC,
1795             SMOOTHER_WINDOW_USEC,
1796             TRUE,
1797             TRUE,
1798             5,
1799             pa_rtclock_now(),
1800             TRUE);
1801     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1802
1803     dev_id = pa_modargs_get_value(
1804             ma, "device_id",
1805             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1806
1807     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1808
1809     if (reserve_init(u, dev_id) < 0)
1810         goto fail;
1811
1812     if (reserve_monitor_init(u, dev_id) < 0)
1813         goto fail;
1814
1815     b = use_mmap;
1816     d = use_tsched;
1817
1818     if (mapping) {
1819
1820         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1821             pa_log("device_id= not set");
1822             goto fail;
1823         }
1824
1825         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1826                       dev_id,
1827                       &u->device_name,
1828                       &ss, &map,
1829                       SND_PCM_STREAM_CAPTURE,
1830                       &period_frames, &buffer_frames, tsched_frames,
1831                       &b, &d, mapping)))
1832             goto fail;
1833
1834     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1835
1836         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1837             goto fail;
1838
1839         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1840                       dev_id,
1841                       &u->device_name,
1842                       &ss, &map,
1843                       SND_PCM_STREAM_CAPTURE,
1844                       &period_frames, &buffer_frames, tsched_frames,
1845                       &b, &d, profile_set, &mapping)))
1846             goto fail;
1847
1848     } else {
1849
1850         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1851                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1852                       &u->device_name,
1853                       &ss, &map,
1854                       SND_PCM_STREAM_CAPTURE,
1855                       &period_frames, &buffer_frames, tsched_frames,
1856                       &b, &d, FALSE)))
1857             goto fail;
1858     }
1859
1860     pa_assert(u->device_name);
1861     pa_log_info("Successfully opened device %s.", u->device_name);
1862
1863     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1864         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1865         goto fail;
1866     }
1867
1868     if (mapping)
1869         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1870
1871     if (use_mmap && !b) {
1872         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1873         u->use_mmap = use_mmap = FALSE;
1874     }
1875
1876     if (use_tsched && (!b || !d)) {
1877         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1878         u->use_tsched = use_tsched = FALSE;
1879     }
1880
1881     if (u->use_mmap)
1882         pa_log_info("Successfully enabled mmap() mode.");
1883
1884     if (u->use_tsched) {
1885         pa_log_info("Successfully enabled timer-based scheduling mode.");
1886         if (u->fixed_latency_range)
1887             pa_log_info("Disabling latency range changes on overrun");
1888     }
1889
1890     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1891     if (!u->rates) {
1892         pa_log_error("Failed to find any supported sample rates.");
1893         goto fail;
1894     }
1895
1896     /* ALSA might tweak the sample spec, so recalculate the frame size */
1897     frame_size = pa_frame_size(&ss);
1898
1899     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1900
1901     pa_source_new_data_init(&data);
1902     data.driver = driver;
1903     data.module = m;
1904     data.card = card;
1905     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1906
1907     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1908      * variable instead of using &data.namereg_fail directly, because
1909      * data.namereg_fail is a bitfield and taking the address of a bitfield
1910      * variable is impossible. */
1911     namereg_fail = data.namereg_fail;
1912     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1913         pa_log("Failed to parse namereg_fail argument.");
1914         pa_source_new_data_done(&data);
1915         goto fail;
1916     }
1917     data.namereg_fail = namereg_fail;
1918
1919     pa_source_new_data_set_sample_spec(&data, &ss);
1920     pa_source_new_data_set_channel_map(&data, &map);
1921     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1922
1923     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1924     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1925     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1926     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1927     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1928
1929     if (mapping) {
1930         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1931         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1932     }
1933
1934     pa_alsa_init_description(data.proplist);
1935
1936     if (u->control_device)
1937         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1938
1939     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1940         pa_log("Invalid properties");
1941         pa_source_new_data_done(&data);
1942         goto fail;
1943     }
1944
1945     if (u->mixer_path_set)
1946         pa_alsa_add_ports(&data, u->mixer_path_set, card);
1947
1948     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1949     pa_source_new_data_done(&data);
1950
1951     if (!u->source) {
1952         pa_log("Failed to create source object");
1953         goto fail;
1954     }
1955
1956     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1957                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1958         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1959         goto fail;
1960     }
1961
1962     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1963                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1964         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1965         goto fail;
1966     }
1967
1968     u->source->parent.process_msg = source_process_msg;
1969     if (u->use_tsched)
1970         u->source->update_requested_latency = source_update_requested_latency_cb;
1971     u->source->set_state = source_set_state_cb;
1972     u->source->set_port = source_set_port_cb;
1973     if (u->source->alternate_sample_rate)
1974         u->source->update_rate = source_update_rate_cb;
1975     u->source->userdata = u;
1976
1977     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1978     pa_source_set_rtpoll(u->source, u->rtpoll);
1979
1980     u->frame_size = frame_size;
1981     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1982     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1983     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1984
1985     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1986                 (double) u->hwbuf_size / (double) u->fragment_size,
1987                 (long unsigned) u->fragment_size,
1988                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1989                 (long unsigned) u->hwbuf_size,
1990                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1991
1992     if (u->use_tsched) {
1993         u->tsched_watermark_ref = tsched_watermark;
1994         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
1995     }
1996     else
1997         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1998
1999     reserve_update(u);
2000
2001     if (update_sw_params(u) < 0)
2002         goto fail;
2003
2004     if (setup_mixer(u, ignore_dB) < 0)
2005         goto fail;
2006
2007     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2008
2009     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2010         pa_log("Failed to create thread.");
2011         goto fail;
2012     }
2013
2014     /* Get initial mixer settings */
2015     if (data.volume_is_set) {
2016         if (u->source->set_volume)
2017             u->source->set_volume(u->source);
2018     } else {
2019         if (u->source->get_volume)
2020             u->source->get_volume(u->source);
2021     }
2022
2023     if (data.muted_is_set) {
2024         if (u->source->set_mute)
2025             u->source->set_mute(u->source);
2026     } else {
2027         if (u->source->get_mute)
2028             u->source->get_mute(u->source);
2029     }
2030
2031     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2032         u->source->write_volume(u->source);
2033
2034     pa_source_put(u->source);
2035
2036     if (profile_set)
2037         pa_alsa_profile_set_free(profile_set);
2038
2039     return u->source;
2040
2041 fail:
2042
2043     if (u)
2044         userdata_free(u);
2045
2046     if (profile_set)
2047         pa_alsa_profile_set_free(profile_set);
2048
2049     return NULL;
2050 }
2051
2052 static void userdata_free(struct userdata *u) {
2053     pa_assert(u);
2054
2055     if (u->source)
2056         pa_source_unlink(u->source);
2057
2058     if (u->thread) {
2059         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2060         pa_thread_free(u->thread);
2061     }
2062
2063     pa_thread_mq_done(&u->thread_mq);
2064
2065     if (u->source)
2066         pa_source_unref(u->source);
2067
2068     if (u->mixer_pd)
2069         pa_alsa_mixer_pdata_free(u->mixer_pd);
2070
2071     if (u->alsa_rtpoll_item)
2072         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2073
2074     if (u->rtpoll)
2075         pa_rtpoll_free(u->rtpoll);
2076
2077     if (u->pcm_handle) {
2078         snd_pcm_drop(u->pcm_handle);
2079         snd_pcm_close(u->pcm_handle);
2080     }
2081
2082     if (u->mixer_fdl)
2083         pa_alsa_fdlist_free(u->mixer_fdl);
2084
2085     if (u->mixer_path && !u->mixer_path_set)
2086         pa_alsa_path_free(u->mixer_path);
2087
2088     if (u->mixer_handle)
2089         snd_mixer_close(u->mixer_handle);
2090
2091     if (u->smoother)
2092         pa_smoother_free(u->smoother);
2093
2094     if (u->rates)
2095         pa_xfree(u->rates);
2096
2097     reserve_done(u);
2098     monitor_done(u);
2099
2100     pa_xfree(u->device_name);
2101     pa_xfree(u->control_device);
2102     pa_xfree(u->paths_dir);
2103     pa_xfree(u);
2104 }
2105
2106 void pa_alsa_source_free(pa_source *s) {
2107     struct userdata *u;
2108
2109     pa_source_assert_ref(s);
2110     pa_assert_se(u = s->userdata);
2111
2112     userdata_free(u);
2113 }