cli: Allow source-output volumes/mute to be set via CLI
[profile/ivi/pulseaudio-panda.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     char *paths_dir;
94     pa_alsa_fdlist *mixer_fdl;
95     pa_alsa_mixer_pdata *mixer_pd;
96     snd_mixer_t *mixer_handle;
97     pa_alsa_path_set *mixer_path_set;
98     pa_alsa_path *mixer_path;
99
100     pa_cvolume hardware_volume;
101
102     unsigned int *rates;
103
104     size_t
105         frame_size,
106         fragment_size,
107         hwbuf_size,
108         tsched_watermark,
109         tsched_watermark_ref,
110         hwbuf_unused,
111         min_sleep,
112         min_wakeup,
113         watermark_inc_step,
114         watermark_dec_step,
115         watermark_inc_threshold,
116         watermark_dec_threshold;
117
118     pa_usec_t watermark_dec_not_before;
119     pa_usec_t min_latency_ref;
120
121     char *device_name;  /* name of the PCM device */
122     char *control_device; /* name of the control device */
123
124     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
125
126     pa_bool_t first;
127
128     pa_rtpoll_item *alsa_rtpoll_item;
129
130     pa_smoother *smoother;
131     uint64_t read_count;
132     pa_usec_t smoother_interval;
133     pa_usec_t last_smoother_update;
134
135     pa_reserve_wrapper *reserve;
136     pa_hook_slot *reserve_slot;
137     pa_reserve_monitor_wrapper *monitor;
138     pa_hook_slot *monitor_slot;
139 };
140
141 static void userdata_free(struct userdata *u);
142
143 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
144     pa_assert(r);
145     pa_assert(u);
146
147     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
148         return PA_HOOK_CANCEL;
149
150     return PA_HOOK_OK;
151 }
152
153 static void reserve_done(struct userdata *u) {
154     pa_assert(u);
155
156     if (u->reserve_slot) {
157         pa_hook_slot_free(u->reserve_slot);
158         u->reserve_slot = NULL;
159     }
160
161     if (u->reserve) {
162         pa_reserve_wrapper_unref(u->reserve);
163         u->reserve = NULL;
164     }
165 }
166
167 static void reserve_update(struct userdata *u) {
168     const char *description;
169     pa_assert(u);
170
171     if (!u->source || !u->reserve)
172         return;
173
174     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
175         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
176 }
177
178 static int reserve_init(struct userdata *u, const char *dname) {
179     char *rname;
180
181     pa_assert(u);
182     pa_assert(dname);
183
184     if (u->reserve)
185         return 0;
186
187     if (pa_in_system_mode())
188         return 0;
189
190     if (!(rname = pa_alsa_get_reserve_name(dname)))
191         return 0;
192
193     /* We are resuming, try to lock the device */
194     u->reserve = pa_reserve_wrapper_get(u->core, rname);
195     pa_xfree(rname);
196
197     if (!(u->reserve))
198         return -1;
199
200     reserve_update(u);
201
202     pa_assert(!u->reserve_slot);
203     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
204
205     return 0;
206 }
207
208 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
209     pa_bool_t b;
210
211     pa_assert(w);
212     pa_assert(u);
213
214     b = PA_PTR_TO_UINT(busy) && !u->reserve;
215
216     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
217     return PA_HOOK_OK;
218 }
219
220 static void monitor_done(struct userdata *u) {
221     pa_assert(u);
222
223     if (u->monitor_slot) {
224         pa_hook_slot_free(u->monitor_slot);
225         u->monitor_slot = NULL;
226     }
227
228     if (u->monitor) {
229         pa_reserve_monitor_wrapper_unref(u->monitor);
230         u->monitor = NULL;
231     }
232 }
233
234 static int reserve_monitor_init(struct userdata *u, const char *dname) {
235     char *rname;
236
237     pa_assert(u);
238     pa_assert(dname);
239
240     if (pa_in_system_mode())
241         return 0;
242
243     if (!(rname = pa_alsa_get_reserve_name(dname)))
244         return 0;
245
246     /* We are resuming, try to lock the device */
247     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
248     pa_xfree(rname);
249
250     if (!(u->monitor))
251         return -1;
252
253     pa_assert(!u->monitor_slot);
254     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
255
256     return 0;
257 }
258
259 static void fix_min_sleep_wakeup(struct userdata *u) {
260     size_t max_use, max_use_2;
261
262     pa_assert(u);
263     pa_assert(u->use_tsched);
264
265     max_use = u->hwbuf_size - u->hwbuf_unused;
266     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
267
268     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
269     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
270
271     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
272     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
273 }
274
275 static void fix_tsched_watermark(struct userdata *u) {
276     size_t max_use;
277     pa_assert(u);
278     pa_assert(u->use_tsched);
279
280     max_use = u->hwbuf_size - u->hwbuf_unused;
281
282     if (u->tsched_watermark > max_use - u->min_sleep)
283         u->tsched_watermark = max_use - u->min_sleep;
284
285     if (u->tsched_watermark < u->min_wakeup)
286         u->tsched_watermark = u->min_wakeup;
287 }
288
289 static void increase_watermark(struct userdata *u) {
290     size_t old_watermark;
291     pa_usec_t old_min_latency, new_min_latency;
292
293     pa_assert(u);
294     pa_assert(u->use_tsched);
295
296     /* First, just try to increase the watermark */
297     old_watermark = u->tsched_watermark;
298     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
299     fix_tsched_watermark(u);
300
301     if (old_watermark != u->tsched_watermark) {
302         pa_log_info("Increasing wakeup watermark to %0.2f ms",
303                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
304         return;
305     }
306
307     /* Hmm, we cannot increase the watermark any further, hence let's
308      raise the latency unless doing so was disabled in
309      configuration */
310     if (u->fixed_latency_range)
311         return;
312
313     old_min_latency = u->source->thread_info.min_latency;
314     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
315     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
316
317     if (old_min_latency != new_min_latency) {
318         pa_log_info("Increasing minimal latency to %0.2f ms",
319                     (double) new_min_latency / PA_USEC_PER_MSEC);
320
321         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
322     }
323
324     /* When we reach this we're officialy fucked! */
325 }
326
327 static void decrease_watermark(struct userdata *u) {
328     size_t old_watermark;
329     pa_usec_t now;
330
331     pa_assert(u);
332     pa_assert(u->use_tsched);
333
334     now = pa_rtclock_now();
335
336     if (u->watermark_dec_not_before <= 0)
337         goto restart;
338
339     if (u->watermark_dec_not_before > now)
340         return;
341
342     old_watermark = u->tsched_watermark;
343
344     if (u->tsched_watermark < u->watermark_dec_step)
345         u->tsched_watermark = u->tsched_watermark / 2;
346     else
347         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
348
349     fix_tsched_watermark(u);
350
351     if (old_watermark != u->tsched_watermark)
352         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
354
355     /* We don't change the latency range*/
356
357 restart:
358     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
359 }
360
361 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
362     pa_usec_t wm, usec;
363
364     pa_assert(sleep_usec);
365     pa_assert(process_usec);
366
367     pa_assert(u);
368     pa_assert(u->use_tsched);
369
370     usec = pa_source_get_requested_latency_within_thread(u->source);
371
372     if (usec == (pa_usec_t) -1)
373         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
374
375     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
376
377     if (wm > usec)
378         wm = usec/2;
379
380     *sleep_usec = usec - wm;
381     *process_usec = wm;
382
383 #ifdef DEBUG_TIMING
384     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385                  (unsigned long) (usec / PA_USEC_PER_MSEC),
386                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
387                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
388 #endif
389 }
390
391 static int try_recover(struct userdata *u, const char *call, int err) {
392     pa_assert(u);
393     pa_assert(call);
394     pa_assert(err < 0);
395
396     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
397
398     pa_assert(err != -EAGAIN);
399
400     if (err == -EPIPE)
401         pa_log_debug("%s: Buffer overrun!", call);
402
403     if (err == -ESTRPIPE)
404         pa_log_debug("%s: System suspended!", call);
405
406     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
407         pa_log("%s: %s", call, pa_alsa_strerror(err));
408         return -1;
409     }
410
411     u->first = TRUE;
412     return 0;
413 }
414
415 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
416     size_t left_to_record;
417     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
418     pa_bool_t overrun = FALSE;
419
420     /* We use <= instead of < for this check here because an overrun
421      * only happens after the last sample was processed, not already when
422      * it is removed from the buffer. This is particularly important
423      * when block transfer is used. */
424
425     if (n_bytes <= rec_space)
426         left_to_record = rec_space - n_bytes;
427     else {
428
429         /* We got a dropout. What a mess! */
430         left_to_record = 0;
431         overrun = TRUE;
432
433 #ifdef DEBUG_TIMING
434         PA_DEBUG_TRAP;
435 #endif
436
437         if (pa_log_ratelimit(PA_LOG_INFO))
438             pa_log_info("Overrun!");
439     }
440
441 #ifdef DEBUG_TIMING
442     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
443 #endif
444
445     if (u->use_tsched) {
446         pa_bool_t reset_not_before = TRUE;
447
448         if (overrun || left_to_record < u->watermark_inc_threshold)
449             increase_watermark(u);
450         else if (left_to_record > u->watermark_dec_threshold) {
451             reset_not_before = FALSE;
452
453             /* We decrease the watermark only if have actually
454              * been woken up by a timeout. If something else woke
455              * us up it's too easy to fulfill the deadlines... */
456
457             if (on_timeout)
458                 decrease_watermark(u);
459         }
460
461         if (reset_not_before)
462             u->watermark_dec_not_before = 0;
463     }
464
465     return left_to_record;
466 }
467
468 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
469     pa_bool_t work_done = FALSE;
470     pa_usec_t max_sleep_usec = 0, process_usec = 0;
471     size_t left_to_record;
472     unsigned j = 0;
473
474     pa_assert(u);
475     pa_source_assert_ref(u->source);
476
477     if (u->use_tsched)
478         hw_sleep_time(u, &max_sleep_usec, &process_usec);
479
480     for (;;) {
481         snd_pcm_sframes_t n;
482         size_t n_bytes;
483         int r;
484         pa_bool_t after_avail = TRUE;
485
486         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
487
488             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
489                 continue;
490
491             return r;
492         }
493
494         n_bytes = (size_t) n * u->frame_size;
495
496 #ifdef DEBUG_TIMING
497         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
498 #endif
499
500         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
501         on_timeout = FALSE;
502
503         if (u->use_tsched)
504             if (!polled &&
505                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
506 #ifdef DEBUG_TIMING
507                 pa_log_debug("Not reading, because too early.");
508 #endif
509                 break;
510             }
511
512         if (PA_UNLIKELY(n_bytes <= 0)) {
513
514             if (polled)
515                 PA_ONCE_BEGIN {
516                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
517                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
518                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
519                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
520                            pa_strnull(dn));
521                     pa_xfree(dn);
522                 } PA_ONCE_END;
523
524 #ifdef DEBUG_TIMING
525             pa_log_debug("Not reading, because not necessary.");
526 #endif
527             break;
528         }
529
530
531         if (++j > 10) {
532 #ifdef DEBUG_TIMING
533             pa_log_debug("Not filling up, because already too many iterations.");
534 #endif
535
536             break;
537         }
538
539         polled = FALSE;
540
541 #ifdef DEBUG_TIMING
542         pa_log_debug("Reading");
543 #endif
544
545         for (;;) {
546             pa_memchunk chunk;
547             void *p;
548             int err;
549             const snd_pcm_channel_area_t *areas;
550             snd_pcm_uframes_t offset, frames;
551             snd_pcm_sframes_t sframes;
552
553             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
554 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
555
556             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
557
558                 if (!after_avail && err == -EAGAIN)
559                     break;
560
561                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
562                     continue;
563
564                 return r;
565             }
566
567             /* Make sure that if these memblocks need to be copied they will fit into one slot */
568             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
569                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
570
571             if (!after_avail && frames == 0)
572                 break;
573
574             pa_assert(frames > 0);
575             after_avail = FALSE;
576
577             /* Check these are multiples of 8 bit */
578             pa_assert((areas[0].first & 7) == 0);
579             pa_assert((areas[0].step & 7)== 0);
580
581             /* We assume a single interleaved memory buffer */
582             pa_assert((areas[0].first >> 3) == 0);
583             pa_assert((areas[0].step >> 3) == u->frame_size);
584
585             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
586
587             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
588             chunk.length = pa_memblock_get_length(chunk.memblock);
589             chunk.index = 0;
590
591             pa_source_post(u->source, &chunk);
592             pa_memblock_unref_fixed(chunk.memblock);
593
594             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
595
596                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
597                     continue;
598
599                 return r;
600             }
601
602             work_done = TRUE;
603
604             u->read_count += frames * u->frame_size;
605
606 #ifdef DEBUG_TIMING
607             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
608 #endif
609
610             if ((size_t) frames * u->frame_size >= n_bytes)
611                 break;
612
613             n_bytes -= (size_t) frames * u->frame_size;
614         }
615     }
616
617     if (u->use_tsched) {
618         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
619         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
620
621         if (*sleep_usec > process_usec)
622             *sleep_usec -= process_usec;
623         else
624             *sleep_usec = 0;
625     }
626
627     return work_done ? 1 : 0;
628 }
629
630 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
631     int work_done = FALSE;
632     pa_usec_t max_sleep_usec = 0, process_usec = 0;
633     size_t left_to_record;
634     unsigned j = 0;
635
636     pa_assert(u);
637     pa_source_assert_ref(u->source);
638
639     if (u->use_tsched)
640         hw_sleep_time(u, &max_sleep_usec, &process_usec);
641
642     for (;;) {
643         snd_pcm_sframes_t n;
644         size_t n_bytes;
645         int r;
646         pa_bool_t after_avail = TRUE;
647
648         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
649
650             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
651                 continue;
652
653             return r;
654         }
655
656         n_bytes = (size_t) n * u->frame_size;
657         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
658         on_timeout = FALSE;
659
660         if (u->use_tsched)
661             if (!polled &&
662                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
663                 break;
664
665         if (PA_UNLIKELY(n_bytes <= 0)) {
666
667             if (polled)
668                 PA_ONCE_BEGIN {
669                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
670                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
671                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
672                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
673                            pa_strnull(dn));
674                     pa_xfree(dn);
675                 } PA_ONCE_END;
676
677             break;
678         }
679
680         if (++j > 10) {
681 #ifdef DEBUG_TIMING
682             pa_log_debug("Not filling up, because already too many iterations.");
683 #endif
684
685             break;
686         }
687
688         polled = FALSE;
689
690         for (;;) {
691             void *p;
692             snd_pcm_sframes_t frames;
693             pa_memchunk chunk;
694
695             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
696
697             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
698
699             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
700                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
701
702 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
703
704             p = pa_memblock_acquire(chunk.memblock);
705             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
706             pa_memblock_release(chunk.memblock);
707
708             if (PA_UNLIKELY(frames < 0)) {
709                 pa_memblock_unref(chunk.memblock);
710
711                 if (!after_avail && (int) frames == -EAGAIN)
712                     break;
713
714                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
715                     continue;
716
717                 return r;
718             }
719
720             if (!after_avail && frames == 0) {
721                 pa_memblock_unref(chunk.memblock);
722                 break;
723             }
724
725             pa_assert(frames > 0);
726             after_avail = FALSE;
727
728             chunk.index = 0;
729             chunk.length = (size_t) frames * u->frame_size;
730
731             pa_source_post(u->source, &chunk);
732             pa_memblock_unref(chunk.memblock);
733
734             work_done = TRUE;
735
736             u->read_count += frames * u->frame_size;
737
738 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
739
740             if ((size_t) frames * u->frame_size >= n_bytes)
741                 break;
742
743             n_bytes -= (size_t) frames * u->frame_size;
744         }
745     }
746
747     if (u->use_tsched) {
748         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
749         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
750
751         if (*sleep_usec > process_usec)
752             *sleep_usec -= process_usec;
753         else
754             *sleep_usec = 0;
755     }
756
757     return work_done ? 1 : 0;
758 }
759
760 static void update_smoother(struct userdata *u) {
761     snd_pcm_sframes_t delay = 0;
762     uint64_t position;
763     int err;
764     pa_usec_t now1 = 0, now2;
765     snd_pcm_status_t *status;
766
767     snd_pcm_status_alloca(&status);
768
769     pa_assert(u);
770     pa_assert(u->pcm_handle);
771
772     /* Let's update the time smoother */
773
774     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
775         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
776         return;
777     }
778
779     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
780         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
781     else {
782         snd_htimestamp_t htstamp = { 0, 0 };
783         snd_pcm_status_get_htstamp(status, &htstamp);
784         now1 = pa_timespec_load(&htstamp);
785     }
786
787     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
788     if (now1 <= 0)
789         now1 = pa_rtclock_now();
790
791     /* check if the time since the last update is bigger than the interval */
792     if (u->last_smoother_update > 0)
793         if (u->last_smoother_update + u->smoother_interval > now1)
794             return;
795
796     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
797     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
798
799     pa_smoother_put(u->smoother, now1, now2);
800
801     u->last_smoother_update = now1;
802     /* exponentially increase the update interval up to the MAX limit */
803     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
804 }
805
806 static pa_usec_t source_get_latency(struct userdata *u) {
807     int64_t delay;
808     pa_usec_t now1, now2;
809
810     pa_assert(u);
811
812     now1 = pa_rtclock_now();
813     now2 = pa_smoother_get(u->smoother, now1);
814
815     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
816
817     return delay >= 0 ? (pa_usec_t) delay : 0;
818 }
819
820 static int build_pollfd(struct userdata *u) {
821     pa_assert(u);
822     pa_assert(u->pcm_handle);
823
824     if (u->alsa_rtpoll_item)
825         pa_rtpoll_item_free(u->alsa_rtpoll_item);
826
827     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
828         return -1;
829
830     return 0;
831 }
832
833 /* Called from IO context */
834 static int suspend(struct userdata *u) {
835     pa_assert(u);
836     pa_assert(u->pcm_handle);
837
838     pa_smoother_pause(u->smoother, pa_rtclock_now());
839
840     /* Let's suspend */
841     snd_pcm_close(u->pcm_handle);
842     u->pcm_handle = NULL;
843
844     if (u->alsa_rtpoll_item) {
845         pa_rtpoll_item_free(u->alsa_rtpoll_item);
846         u->alsa_rtpoll_item = NULL;
847     }
848
849     pa_log_info("Device suspended...");
850
851     return 0;
852 }
853
854 /* Called from IO context */
855 static int update_sw_params(struct userdata *u) {
856     snd_pcm_uframes_t avail_min;
857     int err;
858
859     pa_assert(u);
860
861     /* Use the full buffer if no one asked us for anything specific */
862     u->hwbuf_unused = 0;
863
864     if (u->use_tsched) {
865         pa_usec_t latency;
866
867         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
868             size_t b;
869
870             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
871
872             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
873
874             /* We need at least one sample in our buffer */
875
876             if (PA_UNLIKELY(b < u->frame_size))
877                 b = u->frame_size;
878
879             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
880         }
881
882         fix_min_sleep_wakeup(u);
883         fix_tsched_watermark(u);
884     }
885
886     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
887
888     avail_min = 1;
889
890     if (u->use_tsched) {
891         pa_usec_t sleep_usec, process_usec;
892
893         hw_sleep_time(u, &sleep_usec, &process_usec);
894         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
895     }
896
897     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
898
899     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
900         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
901         return err;
902     }
903
904     return 0;
905 }
906
907 /* Called from IO Context on unsuspend or from main thread when creating source */
908 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
909                             pa_bool_t in_thread)
910 {
911     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
912                                                     &u->source->sample_spec);
913
914     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
915     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
916
917     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
918     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
919
920     fix_min_sleep_wakeup(u);
921     fix_tsched_watermark(u);
922
923     if (in_thread)
924         pa_source_set_latency_range_within_thread(u->source,
925                                                   u->min_latency_ref,
926                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
927     else {
928         pa_source_set_latency_range(u->source,
929                                     0,
930                                     pa_bytes_to_usec(u->hwbuf_size, ss));
931
932         /* work-around assert in pa_source_set_latency_within_thead,
933            keep track of min_latency and reuse it when
934            this routine is called from IO context */
935         u->min_latency_ref = u->source->thread_info.min_latency;
936     }
937
938     pa_log_info("Time scheduling watermark is %0.2fms",
939                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
940 }
941
942 /* Called from IO context */
943 static int unsuspend(struct userdata *u) {
944     pa_sample_spec ss;
945     int err;
946     pa_bool_t b, d;
947     snd_pcm_uframes_t period_size, buffer_size;
948
949     pa_assert(u);
950     pa_assert(!u->pcm_handle);
951
952     pa_log_info("Trying resume...");
953
954     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
955                             SND_PCM_NONBLOCK|
956                             SND_PCM_NO_AUTO_RESAMPLE|
957                             SND_PCM_NO_AUTO_CHANNELS|
958                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
959         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
960         goto fail;
961     }
962
963     ss = u->source->sample_spec;
964     period_size = u->fragment_size / u->frame_size;
965     buffer_size = u->hwbuf_size / u->frame_size;
966     b = u->use_mmap;
967     d = u->use_tsched;
968
969     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
970         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
971         goto fail;
972     }
973
974     if (b != u->use_mmap || d != u->use_tsched) {
975         pa_log_warn("Resume failed, couldn't get original access mode.");
976         goto fail;
977     }
978
979     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
980         pa_log_warn("Resume failed, couldn't restore original sample settings.");
981         goto fail;
982     }
983
984     if (period_size*u->frame_size != u->fragment_size ||
985         buffer_size*u->frame_size != u->hwbuf_size) {
986         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
987                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
988                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
989         goto fail;
990     }
991
992     if (update_sw_params(u) < 0)
993         goto fail;
994
995     if (build_pollfd(u) < 0)
996         goto fail;
997
998     /* FIXME: We need to reload the volume somehow */
999
1000     u->read_count = 0;
1001     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1002     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1003     u->last_smoother_update = 0;
1004
1005     u->first = TRUE;
1006
1007     /* reset the watermark to the value defined when source was created */
1008     if (u->use_tsched)
1009         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1010
1011     pa_log_info("Resumed successfully...");
1012
1013     return 0;
1014
1015 fail:
1016     if (u->pcm_handle) {
1017         snd_pcm_close(u->pcm_handle);
1018         u->pcm_handle = NULL;
1019     }
1020
1021     return -PA_ERR_IO;
1022 }
1023
1024 /* Called from IO context */
1025 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1026     struct userdata *u = PA_SOURCE(o)->userdata;
1027
1028     switch (code) {
1029
1030         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1031             pa_usec_t r = 0;
1032
1033             if (u->pcm_handle)
1034                 r = source_get_latency(u);
1035
1036             *((pa_usec_t*) data) = r;
1037
1038             return 0;
1039         }
1040
1041         case PA_SOURCE_MESSAGE_SET_STATE:
1042
1043             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1044
1045                 case PA_SOURCE_SUSPENDED: {
1046                     int r;
1047
1048                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1049
1050                     if ((r = suspend(u)) < 0)
1051                         return r;
1052
1053                     break;
1054                 }
1055
1056                 case PA_SOURCE_IDLE:
1057                 case PA_SOURCE_RUNNING: {
1058                     int r;
1059
1060                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1061                         if (build_pollfd(u) < 0)
1062                             return -PA_ERR_IO;
1063                     }
1064
1065                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1066                         if ((r = unsuspend(u)) < 0)
1067                             return r;
1068                     }
1069
1070                     break;
1071                 }
1072
1073                 case PA_SOURCE_UNLINKED:
1074                 case PA_SOURCE_INIT:
1075                 case PA_SOURCE_INVALID_STATE:
1076                     ;
1077             }
1078
1079             break;
1080     }
1081
1082     return pa_source_process_msg(o, code, data, offset, chunk);
1083 }
1084
1085 /* Called from main context */
1086 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1087     pa_source_state_t old_state;
1088     struct userdata *u;
1089
1090     pa_source_assert_ref(s);
1091     pa_assert_se(u = s->userdata);
1092
1093     old_state = pa_source_get_state(u->source);
1094
1095     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1096         reserve_done(u);
1097     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1098         if (reserve_init(u, u->device_name) < 0)
1099             return -PA_ERR_BUSY;
1100
1101     return 0;
1102 }
1103
1104 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1105     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1106
1107     pa_assert(u);
1108     pa_assert(u->mixer_handle);
1109
1110     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1111         return 0;
1112
1113     if (!PA_SOURCE_IS_LINKED(u->source->state))
1114         return 0;
1115
1116     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1117         return 0;
1118
1119     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1120         pa_source_get_volume(u->source, TRUE);
1121         pa_source_get_mute(u->source, TRUE);
1122     }
1123
1124     return 0;
1125 }
1126
1127 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1128     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1129
1130     pa_assert(u);
1131     pa_assert(u->mixer_handle);
1132
1133     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1134         return 0;
1135
1136     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1137         return 0;
1138
1139     if (mask & SND_CTL_EVENT_MASK_VALUE)
1140         pa_source_update_volume_and_mute(u->source);
1141
1142     return 0;
1143 }
1144
1145 static void source_get_volume_cb(pa_source *s) {
1146     struct userdata *u = s->userdata;
1147     pa_cvolume r;
1148     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1149
1150     pa_assert(u);
1151     pa_assert(u->mixer_path);
1152     pa_assert(u->mixer_handle);
1153
1154     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1155         return;
1156
1157     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1158     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1159
1160     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1161
1162     if (u->mixer_path->has_dB) {
1163         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1164
1165         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1166     }
1167
1168     if (pa_cvolume_equal(&u->hardware_volume, &r))
1169         return;
1170
1171     s->real_volume = u->hardware_volume = r;
1172
1173     /* Hmm, so the hardware volume changed, let's reset our software volume */
1174     if (u->mixer_path->has_dB)
1175         pa_source_set_soft_volume(s, NULL);
1176 }
1177
1178 static void source_set_volume_cb(pa_source *s) {
1179     struct userdata *u = s->userdata;
1180     pa_cvolume r;
1181     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1182     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1183
1184     pa_assert(u);
1185     pa_assert(u->mixer_path);
1186     pa_assert(u->mixer_handle);
1187
1188     /* Shift up by the base volume */
1189     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1190
1191     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1192         return;
1193
1194     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1195     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1196
1197     u->hardware_volume = r;
1198
1199     if (u->mixer_path->has_dB) {
1200         pa_cvolume new_soft_volume;
1201         pa_bool_t accurate_enough;
1202         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1203
1204         /* Match exactly what the user requested by software */
1205         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1206
1207         /* If the adjustment to do in software is only minimal we
1208          * can skip it. That saves us CPU at the expense of a bit of
1209          * accuracy */
1210         accurate_enough =
1211             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1212             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1213
1214         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1215         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1216         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1217         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1218         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1219                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1220                      pa_yes_no(accurate_enough));
1221         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1222
1223         if (!accurate_enough)
1224             s->soft_volume = new_soft_volume;
1225
1226     } else {
1227         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1228
1229         /* We can't match exactly what the user requested, hence let's
1230          * at least tell the user about it */
1231
1232         s->real_volume = r;
1233     }
1234 }
1235
1236 static void source_write_volume_cb(pa_source *s) {
1237     struct userdata *u = s->userdata;
1238     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1239
1240     pa_assert(u);
1241     pa_assert(u->mixer_path);
1242     pa_assert(u->mixer_handle);
1243     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1244
1245     /* Shift up by the base volume */
1246     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1247
1248     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1249         pa_log_error("Writing HW volume failed");
1250     else {
1251         pa_cvolume tmp_vol;
1252         pa_bool_t accurate_enough;
1253
1254         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1255         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1256
1257         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1258         accurate_enough =
1259             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1260             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1261
1262         if (!accurate_enough) {
1263             union {
1264                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1265                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1266             } vol;
1267
1268             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1269                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1270                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1271             pa_log_debug("                                           in dB: %s (request) != %s",
1272                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1273                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1274         }
1275     }
1276 }
1277
1278 static void source_get_mute_cb(pa_source *s) {
1279     struct userdata *u = s->userdata;
1280     pa_bool_t b;
1281
1282     pa_assert(u);
1283     pa_assert(u->mixer_path);
1284     pa_assert(u->mixer_handle);
1285
1286     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1287         return;
1288
1289     s->muted = b;
1290 }
1291
1292 static void source_set_mute_cb(pa_source *s) {
1293     struct userdata *u = s->userdata;
1294
1295     pa_assert(u);
1296     pa_assert(u->mixer_path);
1297     pa_assert(u->mixer_handle);
1298
1299     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1300 }
1301
1302 static void mixer_volume_init(struct userdata *u) {
1303     pa_assert(u);
1304
1305     if (!u->mixer_path->has_volume) {
1306         pa_source_set_write_volume_callback(u->source, NULL);
1307         pa_source_set_get_volume_callback(u->source, NULL);
1308         pa_source_set_set_volume_callback(u->source, NULL);
1309
1310         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1311     } else {
1312         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1313         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1314
1315         if (u->mixer_path->has_dB && u->deferred_volume) {
1316             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1317             pa_log_info("Successfully enabled deferred volume.");
1318         } else
1319             pa_source_set_write_volume_callback(u->source, NULL);
1320
1321         if (u->mixer_path->has_dB) {
1322             pa_source_enable_decibel_volume(u->source, TRUE);
1323             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1324
1325             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1326             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1327
1328             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1329         } else {
1330             pa_source_enable_decibel_volume(u->source, FALSE);
1331             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1332
1333             u->source->base_volume = PA_VOLUME_NORM;
1334             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1335         }
1336
1337         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1338     }
1339
1340     if (!u->mixer_path->has_mute) {
1341         pa_source_set_get_mute_callback(u->source, NULL);
1342         pa_source_set_set_mute_callback(u->source, NULL);
1343         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1344     } else {
1345         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1346         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1347         pa_log_info("Using hardware mute control.");
1348     }
1349 }
1350
1351 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1352     struct userdata *u = s->userdata;
1353     pa_alsa_port_data *data;
1354
1355     pa_assert(u);
1356     pa_assert(p);
1357     pa_assert(u->mixer_handle);
1358
1359     data = PA_DEVICE_PORT_DATA(p);
1360
1361     pa_assert_se(u->mixer_path = data->path);
1362     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1363
1364     mixer_volume_init(u);
1365
1366     if (data->setting)
1367         pa_alsa_setting_select(data->setting, u->mixer_handle);
1368
1369     if (s->set_mute)
1370         s->set_mute(s);
1371     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1372         if (s->write_volume)
1373             s->write_volume(s);
1374     } else {
1375         if (s->set_volume)
1376             s->set_volume(s);
1377     }
1378
1379     return 0;
1380 }
1381
1382 static void source_update_requested_latency_cb(pa_source *s) {
1383     struct userdata *u = s->userdata;
1384     pa_assert(u);
1385     pa_assert(u->use_tsched); /* only when timer scheduling is used
1386                                * we can dynamically adjust the
1387                                * latency */
1388
1389     if (!u->pcm_handle)
1390         return;
1391
1392     update_sw_params(u);
1393 }
1394
1395 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1396 {
1397     struct userdata *u = s->userdata;
1398     int i;
1399     pa_bool_t supported = FALSE;
1400
1401     pa_assert(u);
1402
1403     for (i = 0; u->rates[i]; i++) {
1404         if (u->rates[i] == rate) {
1405             supported = TRUE;
1406             break;
1407         }
1408     }
1409
1410     if (!supported) {
1411         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1412         return FALSE;
1413     }
1414
1415     if (!PA_SOURCE_IS_OPENED(s->state)) {
1416         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1417         u->source->sample_spec.rate = rate;
1418         return TRUE;
1419     }
1420
1421     return FALSE;
1422 }
1423
1424 static void thread_func(void *userdata) {
1425     struct userdata *u = userdata;
1426     unsigned short revents = 0;
1427
1428     pa_assert(u);
1429
1430     pa_log_debug("Thread starting up");
1431
1432     if (u->core->realtime_scheduling)
1433         pa_make_realtime(u->core->realtime_priority);
1434
1435     pa_thread_mq_install(&u->thread_mq);
1436
1437     for (;;) {
1438         int ret;
1439         pa_usec_t rtpoll_sleep = 0;
1440
1441 #ifdef DEBUG_TIMING
1442         pa_log_debug("Loop");
1443 #endif
1444
1445         /* Read some data and pass it to the sources */
1446         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1447             int work_done;
1448             pa_usec_t sleep_usec = 0;
1449             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1450
1451             if (u->first) {
1452                 pa_log_info("Starting capture.");
1453                 snd_pcm_start(u->pcm_handle);
1454
1455                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1456
1457                 u->first = FALSE;
1458             }
1459
1460             if (u->use_mmap)
1461                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1462             else
1463                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1464
1465             if (work_done < 0)
1466                 goto fail;
1467
1468 /*             pa_log_debug("work_done = %i", work_done); */
1469
1470             if (work_done)
1471                 update_smoother(u);
1472
1473             if (u->use_tsched) {
1474                 pa_usec_t cusec;
1475
1476                 /* OK, the capture buffer is now empty, let's
1477                  * calculate when to wake up next */
1478
1479 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1480
1481                 /* Convert from the sound card time domain to the
1482                  * system time domain */
1483                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1484
1485 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1486
1487                 /* We don't trust the conversion, so we wake up whatever comes first */
1488                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1489             }
1490         }
1491
1492         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1493             pa_usec_t volume_sleep;
1494             pa_source_volume_change_apply(u->source, &volume_sleep);
1495             if (volume_sleep > 0) {
1496                 if (rtpoll_sleep > 0)
1497                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1498                 else
1499                     rtpoll_sleep = volume_sleep;
1500             }
1501         }
1502
1503         if (rtpoll_sleep > 0)
1504             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1505         else
1506             pa_rtpoll_set_timer_disabled(u->rtpoll);
1507
1508         /* Hmm, nothing to do. Let's sleep */
1509         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1510             goto fail;
1511
1512         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1513             pa_source_volume_change_apply(u->source, NULL);
1514
1515         if (ret == 0)
1516             goto finish;
1517
1518         /* Tell ALSA about this and process its response */
1519         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1520             struct pollfd *pollfd;
1521             int err;
1522             unsigned n;
1523
1524             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1525
1526             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1527                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1528                 goto fail;
1529             }
1530
1531             if (revents & ~POLLIN) {
1532                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1533                     goto fail;
1534
1535                 u->first = TRUE;
1536                 revents = 0;
1537             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1538                 pa_log_debug("Wakeup from ALSA!");
1539
1540         } else
1541             revents = 0;
1542     }
1543
1544 fail:
1545     /* If this was no regular exit from the loop we have to continue
1546      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1547     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1548     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1549
1550 finish:
1551     pa_log_debug("Thread shutting down");
1552 }
1553
1554 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1555     const char *n;
1556     char *t;
1557
1558     pa_assert(data);
1559     pa_assert(ma);
1560     pa_assert(device_name);
1561
1562     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1563         pa_source_new_data_set_name(data, n);
1564         data->namereg_fail = TRUE;
1565         return;
1566     }
1567
1568     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1569         data->namereg_fail = TRUE;
1570     else {
1571         n = device_id ? device_id : device_name;
1572         data->namereg_fail = FALSE;
1573     }
1574
1575     if (mapping)
1576         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1577     else
1578         t = pa_sprintf_malloc("alsa_input.%s", n);
1579
1580     pa_source_new_data_set_name(data, t);
1581     pa_xfree(t);
1582 }
1583
1584 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1585     snd_hctl_t *hctl;
1586
1587     if (!mapping && !element)
1588         return;
1589
1590     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1591         pa_log_info("Failed to find a working mixer device.");
1592         return;
1593     }
1594
1595     if (element) {
1596
1597         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1598             goto fail;
1599
1600         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1601             goto fail;
1602
1603         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1604         pa_alsa_path_dump(u->mixer_path);
1605     } else if (!(u->mixer_path_set = mapping->input_path_set))
1606         goto fail;
1607
1608     return;
1609
1610 fail:
1611
1612     if (u->mixer_path) {
1613         pa_alsa_path_free(u->mixer_path);
1614         u->mixer_path = NULL;
1615     }
1616
1617     if (u->mixer_handle) {
1618         snd_mixer_close(u->mixer_handle);
1619         u->mixer_handle = NULL;
1620     }
1621 }
1622
1623 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1624     pa_bool_t need_mixer_callback = FALSE;
1625
1626     pa_assert(u);
1627
1628     if (!u->mixer_handle)
1629         return 0;
1630
1631     if (u->source->active_port) {
1632         pa_alsa_port_data *data;
1633
1634         /* We have a list of supported paths, so let's activate the
1635          * one that has been chosen as active */
1636
1637         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1638         u->mixer_path = data->path;
1639
1640         pa_alsa_path_select(data->path, u->mixer_handle);
1641
1642         if (data->setting)
1643             pa_alsa_setting_select(data->setting, u->mixer_handle);
1644
1645     } else {
1646
1647         if (!u->mixer_path && u->mixer_path_set)
1648             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1649
1650         if (u->mixer_path) {
1651             /* Hmm, we have only a single path, then let's activate it */
1652
1653             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1654
1655             if (u->mixer_path->settings)
1656                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1657         } else
1658             return 0;
1659     }
1660
1661     mixer_volume_init(u);
1662
1663     /* Will we need to register callbacks? */
1664     if (u->mixer_path_set && u->mixer_path_set->paths) {
1665         pa_alsa_path *p;
1666         void *state;
1667
1668         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1669             if (p->has_volume || p->has_mute)
1670                 need_mixer_callback = TRUE;
1671         }
1672     }
1673     else if (u->mixer_path)
1674         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1675
1676     if (need_mixer_callback) {
1677         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1678         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1679             u->mixer_pd = pa_alsa_mixer_pdata_new();
1680             mixer_callback = io_mixer_callback;
1681
1682             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1683                 pa_log("Failed to initialize file descriptor monitoring");
1684                 return -1;
1685             }
1686         } else {
1687             u->mixer_fdl = pa_alsa_fdlist_new();
1688             mixer_callback = ctl_mixer_callback;
1689
1690             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1691                 pa_log("Failed to initialize file descriptor monitoring");
1692                 return -1;
1693             }
1694         }
1695
1696         if (u->mixer_path_set)
1697             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1698         else
1699             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1700     }
1701
1702     return 0;
1703 }
1704
1705 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1706
1707     struct userdata *u = NULL;
1708     const char *dev_id = NULL;
1709     pa_sample_spec ss;
1710     uint32_t alternate_sample_rate;
1711     pa_channel_map map;
1712     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1713     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1714     size_t frame_size;
1715     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1716     pa_source_new_data data;
1717     pa_alsa_profile_set *profile_set = NULL;
1718
1719     pa_assert(m);
1720     pa_assert(ma);
1721
1722     ss = m->core->default_sample_spec;
1723     map = m->core->default_channel_map;
1724     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1725         pa_log("Failed to parse sample specification and channel map");
1726         goto fail;
1727     }
1728
1729     alternate_sample_rate = m->core->alternate_sample_rate;
1730     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1731         pa_log("Failed to parse alternate sample rate");
1732         goto fail;
1733     }
1734
1735     frame_size = pa_frame_size(&ss);
1736
1737     nfrags = m->core->default_n_fragments;
1738     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1739     if (frag_size <= 0)
1740         frag_size = (uint32_t) frame_size;
1741     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1742     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1743
1744     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1745         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1746         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1747         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1748         pa_log("Failed to parse buffer metrics");
1749         goto fail;
1750     }
1751
1752     buffer_size = nfrags * frag_size;
1753
1754     period_frames = frag_size/frame_size;
1755     buffer_frames = buffer_size/frame_size;
1756     tsched_frames = tsched_size/frame_size;
1757
1758     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1759         pa_log("Failed to parse mmap argument.");
1760         goto fail;
1761     }
1762
1763     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1764         pa_log("Failed to parse tsched argument.");
1765         goto fail;
1766     }
1767
1768     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1769         pa_log("Failed to parse ignore_dB argument.");
1770         goto fail;
1771     }
1772
1773     deferred_volume = m->core->deferred_volume;
1774     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1775         pa_log("Failed to parse deferred_volume argument.");
1776         goto fail;
1777     }
1778
1779     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1780         pa_log("Failed to parse fixed_latency_range argument.");
1781         goto fail;
1782     }
1783
1784     use_tsched = pa_alsa_may_tsched(use_tsched);
1785
1786     u = pa_xnew0(struct userdata, 1);
1787     u->core = m->core;
1788     u->module = m;
1789     u->use_mmap = use_mmap;
1790     u->use_tsched = use_tsched;
1791     u->deferred_volume = deferred_volume;
1792     u->fixed_latency_range = fixed_latency_range;
1793     u->first = TRUE;
1794     u->rtpoll = pa_rtpoll_new();
1795     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1796
1797     u->smoother = pa_smoother_new(
1798             SMOOTHER_ADJUST_USEC,
1799             SMOOTHER_WINDOW_USEC,
1800             TRUE,
1801             TRUE,
1802             5,
1803             pa_rtclock_now(),
1804             TRUE);
1805     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1806
1807     dev_id = pa_modargs_get_value(
1808             ma, "device_id",
1809             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1810
1811     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1812
1813     if (reserve_init(u, dev_id) < 0)
1814         goto fail;
1815
1816     if (reserve_monitor_init(u, dev_id) < 0)
1817         goto fail;
1818
1819     b = use_mmap;
1820     d = use_tsched;
1821
1822     if (mapping) {
1823
1824         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1825             pa_log("device_id= not set");
1826             goto fail;
1827         }
1828
1829         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1830                       dev_id,
1831                       &u->device_name,
1832                       &ss, &map,
1833                       SND_PCM_STREAM_CAPTURE,
1834                       &period_frames, &buffer_frames, tsched_frames,
1835                       &b, &d, mapping)))
1836             goto fail;
1837
1838     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1839
1840         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1841             goto fail;
1842
1843         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1844                       dev_id,
1845                       &u->device_name,
1846                       &ss, &map,
1847                       SND_PCM_STREAM_CAPTURE,
1848                       &period_frames, &buffer_frames, tsched_frames,
1849                       &b, &d, profile_set, &mapping)))
1850             goto fail;
1851
1852     } else {
1853
1854         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1855                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1856                       &u->device_name,
1857                       &ss, &map,
1858                       SND_PCM_STREAM_CAPTURE,
1859                       &period_frames, &buffer_frames, tsched_frames,
1860                       &b, &d, FALSE)))
1861             goto fail;
1862     }
1863
1864     pa_assert(u->device_name);
1865     pa_log_info("Successfully opened device %s.", u->device_name);
1866
1867     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1868         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1869         goto fail;
1870     }
1871
1872     if (mapping)
1873         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1874
1875     if (use_mmap && !b) {
1876         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1877         u->use_mmap = use_mmap = FALSE;
1878     }
1879
1880     if (use_tsched && (!b || !d)) {
1881         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1882         u->use_tsched = use_tsched = FALSE;
1883     }
1884
1885     if (u->use_mmap)
1886         pa_log_info("Successfully enabled mmap() mode.");
1887
1888     if (u->use_tsched) {
1889         pa_log_info("Successfully enabled timer-based scheduling mode.");
1890         if (u->fixed_latency_range)
1891             pa_log_info("Disabling latency range changes on overrun");
1892     }
1893
1894     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1895     if (!u->rates) {
1896         pa_log_error("Failed to find any supported sample rates.");
1897         goto fail;
1898     }
1899
1900     /* ALSA might tweak the sample spec, so recalculate the frame size */
1901     frame_size = pa_frame_size(&ss);
1902
1903     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1904
1905     pa_source_new_data_init(&data);
1906     data.driver = driver;
1907     data.module = m;
1908     data.card = card;
1909     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1910
1911     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1912      * variable instead of using &data.namereg_fail directly, because
1913      * data.namereg_fail is a bitfield and taking the address of a bitfield
1914      * variable is impossible. */
1915     namereg_fail = data.namereg_fail;
1916     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1917         pa_log("Failed to parse namereg_fail argument.");
1918         pa_source_new_data_done(&data);
1919         goto fail;
1920     }
1921     data.namereg_fail = namereg_fail;
1922
1923     pa_source_new_data_set_sample_spec(&data, &ss);
1924     pa_source_new_data_set_channel_map(&data, &map);
1925     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1926
1927     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1928     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1929     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1930     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1931     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1932
1933     if (mapping) {
1934         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1935         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1936     }
1937
1938     pa_alsa_init_description(data.proplist);
1939
1940     if (u->control_device)
1941         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1942
1943     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1944         pa_log("Invalid properties");
1945         pa_source_new_data_done(&data);
1946         goto fail;
1947     }
1948
1949     if (u->mixer_path_set)
1950         pa_alsa_add_ports(&data.ports, u->mixer_path_set, card);
1951
1952     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1953     pa_source_new_data_done(&data);
1954
1955     if (!u->source) {
1956         pa_log("Failed to create source object");
1957         goto fail;
1958     }
1959
1960     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1961                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1962         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1963         goto fail;
1964     }
1965
1966     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1967                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1968         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1969         goto fail;
1970     }
1971
1972     u->source->parent.process_msg = source_process_msg;
1973     if (u->use_tsched)
1974         u->source->update_requested_latency = source_update_requested_latency_cb;
1975     u->source->set_state = source_set_state_cb;
1976     u->source->set_port = source_set_port_cb;
1977     if (u->source->alternate_sample_rate)
1978         u->source->update_rate = source_update_rate_cb;
1979     u->source->userdata = u;
1980
1981     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1982     pa_source_set_rtpoll(u->source, u->rtpoll);
1983
1984     u->frame_size = frame_size;
1985     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1986     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1987     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1988
1989     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1990                 (double) u->hwbuf_size / (double) u->fragment_size,
1991                 (long unsigned) u->fragment_size,
1992                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1993                 (long unsigned) u->hwbuf_size,
1994                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1995
1996     if (u->use_tsched) {
1997         u->tsched_watermark_ref = tsched_watermark;
1998         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
1999     }
2000     else
2001         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2002
2003     reserve_update(u);
2004
2005     if (update_sw_params(u) < 0)
2006         goto fail;
2007
2008     if (setup_mixer(u, ignore_dB) < 0)
2009         goto fail;
2010
2011     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2012
2013     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2014         pa_log("Failed to create thread.");
2015         goto fail;
2016     }
2017
2018     /* Get initial mixer settings */
2019     if (data.volume_is_set) {
2020         if (u->source->set_volume)
2021             u->source->set_volume(u->source);
2022     } else {
2023         if (u->source->get_volume)
2024             u->source->get_volume(u->source);
2025     }
2026
2027     if (data.muted_is_set) {
2028         if (u->source->set_mute)
2029             u->source->set_mute(u->source);
2030     } else {
2031         if (u->source->get_mute)
2032             u->source->get_mute(u->source);
2033     }
2034
2035     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2036         u->source->write_volume(u->source);
2037
2038     pa_source_put(u->source);
2039
2040     if (profile_set)
2041         pa_alsa_profile_set_free(profile_set);
2042
2043     return u->source;
2044
2045 fail:
2046
2047     if (u)
2048         userdata_free(u);
2049
2050     if (profile_set)
2051         pa_alsa_profile_set_free(profile_set);
2052
2053     return NULL;
2054 }
2055
2056 static void userdata_free(struct userdata *u) {
2057     pa_assert(u);
2058
2059     if (u->source)
2060         pa_source_unlink(u->source);
2061
2062     if (u->thread) {
2063         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2064         pa_thread_free(u->thread);
2065     }
2066
2067     pa_thread_mq_done(&u->thread_mq);
2068
2069     if (u->source)
2070         pa_source_unref(u->source);
2071
2072     if (u->mixer_pd)
2073         pa_alsa_mixer_pdata_free(u->mixer_pd);
2074
2075     if (u->alsa_rtpoll_item)
2076         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2077
2078     if (u->rtpoll)
2079         pa_rtpoll_free(u->rtpoll);
2080
2081     if (u->pcm_handle) {
2082         snd_pcm_drop(u->pcm_handle);
2083         snd_pcm_close(u->pcm_handle);
2084     }
2085
2086     if (u->mixer_fdl)
2087         pa_alsa_fdlist_free(u->mixer_fdl);
2088
2089     if (u->mixer_path && !u->mixer_path_set)
2090         pa_alsa_path_free(u->mixer_path);
2091
2092     if (u->mixer_handle)
2093         snd_mixer_close(u->mixer_handle);
2094
2095     if (u->smoother)
2096         pa_smoother_free(u->smoother);
2097
2098     if (u->rates)
2099         pa_xfree(u->rates);
2100
2101     reserve_done(u);
2102     monitor_done(u);
2103
2104     pa_xfree(u->device_name);
2105     pa_xfree(u->control_device);
2106     pa_xfree(u->paths_dir);
2107     pa_xfree(u);
2108 }
2109
2110 void pa_alsa_source_free(pa_source *s) {
2111     struct userdata *u;
2112
2113     pa_source_assert_ref(s);
2114     pa_assert_se(u = s->userdata);
2115
2116     userdata_free(u);
2117 }