sink, source: Rework reconfiguration logic to apply to more than rate
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <signal.h>
26 #include <stdio.h>
27
28 #include <asoundlib.h>
29
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/volume.h>
33 #include <pulse/xmalloc.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/i18n.h>
37 #include <pulsecore/module.h>
38 #include <pulsecore/memchunk.h>
39 #include <pulsecore/sink.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/core-rtclock.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/thread-mq.h>
48 #include <pulsecore/rtpoll.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include <modules/reserve-wrap.h>
52
53 #include "alsa-util.h"
54 #include "alsa-source.h"
55
56 /* #define DEBUG_TIMING */
57
58 #define DEFAULT_DEVICE "default"
59
60 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
61 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
62
63 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
64 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
65 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
66 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
67 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
68 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
69
70 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
71 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
72
73 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
74 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82     pa_core *core;
83     pa_module *module;
84     pa_source *source;
85
86     pa_thread *thread;
87     pa_thread_mq thread_mq;
88     pa_rtpoll *rtpoll;
89
90     snd_pcm_t *pcm_handle;
91
92     char *paths_dir;
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     unsigned int *rates;
102
103     size_t
104         frame_size,
105         fragment_size,
106         hwbuf_size,
107         tsched_watermark,
108         tsched_watermark_ref,
109         hwbuf_unused,
110         min_sleep,
111         min_wakeup,
112         watermark_inc_step,
113         watermark_dec_step,
114         watermark_inc_threshold,
115         watermark_dec_threshold;
116
117     snd_pcm_uframes_t frames_per_block;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121     pa_usec_t tsched_watermark_usec;
122
123     char *device_name;  /* name of the PCM device */
124     char *control_device; /* name of the control device */
125
126     bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
127
128     bool first;
129
130     pa_rtpoll_item *alsa_rtpoll_item;
131
132     pa_smoother *smoother;
133     uint64_t read_count;
134     pa_usec_t smoother_interval;
135     pa_usec_t last_smoother_update;
136
137     pa_reserve_wrapper *reserve;
138     pa_hook_slot *reserve_slot;
139     pa_reserve_monitor_wrapper *monitor;
140     pa_hook_slot *monitor_slot;
141
142     /* ucm context */
143     pa_alsa_ucm_mapping_context *ucm_context;
144 };
145
146 static void userdata_free(struct userdata *u);
147
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
149     pa_assert(r);
150     pa_assert(u);
151
152     pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
153
154     if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155         return PA_HOOK_CANCEL;
156
157     return PA_HOOK_OK;
158 }
159
160 static void reserve_done(struct userdata *u) {
161     pa_assert(u);
162
163     if (u->reserve_slot) {
164         pa_hook_slot_free(u->reserve_slot);
165         u->reserve_slot = NULL;
166     }
167
168     if (u->reserve) {
169         pa_reserve_wrapper_unref(u->reserve);
170         u->reserve = NULL;
171     }
172 }
173
174 static void reserve_update(struct userdata *u) {
175     const char *description;
176     pa_assert(u);
177
178     if (!u->source || !u->reserve)
179         return;
180
181     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
183 }
184
185 static int reserve_init(struct userdata *u, const char *dname) {
186     char *rname;
187
188     pa_assert(u);
189     pa_assert(dname);
190
191     if (u->reserve)
192         return 0;
193
194     if (pa_in_system_mode())
195         return 0;
196
197     if (!(rname = pa_alsa_get_reserve_name(dname)))
198         return 0;
199
200     /* We are resuming, try to lock the device */
201     u->reserve = pa_reserve_wrapper_get(u->core, rname);
202     pa_xfree(rname);
203
204     if (!(u->reserve))
205         return -1;
206
207     reserve_update(u);
208
209     pa_assert(!u->reserve_slot);
210     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211
212     return 0;
213 }
214
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
216     pa_assert(w);
217     pa_assert(u);
218
219     if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220         pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221         pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
222     } else {
223         pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224         pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
225     }
226
227     return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231     pa_assert(u);
232
233     if (u->monitor_slot) {
234         pa_hook_slot_free(u->monitor_slot);
235         u->monitor_slot = NULL;
236     }
237
238     if (u->monitor) {
239         pa_reserve_monitor_wrapper_unref(u->monitor);
240         u->monitor = NULL;
241     }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245     char *rname;
246
247     pa_assert(u);
248     pa_assert(dname);
249
250     if (pa_in_system_mode())
251         return 0;
252
253     if (!(rname = pa_alsa_get_reserve_name(dname)))
254         return 0;
255
256     /* We are resuming, try to lock the device */
257     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258     pa_xfree(rname);
259
260     if (!(u->monitor))
261         return -1;
262
263     pa_assert(!u->monitor_slot);
264     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266     return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270     size_t max_use, max_use_2;
271
272     pa_assert(u);
273     pa_assert(u->use_tsched);
274
275     max_use = u->hwbuf_size - u->hwbuf_unused;
276     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
277
278     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286     size_t max_use;
287     pa_assert(u);
288     pa_assert(u->use_tsched);
289
290     max_use = u->hwbuf_size - u->hwbuf_unused;
291
292     if (u->tsched_watermark > max_use - u->min_sleep)
293         u->tsched_watermark = max_use - u->min_sleep;
294
295     if (u->tsched_watermark < u->min_wakeup)
296         u->tsched_watermark = u->min_wakeup;
297
298    u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
299 }
300
301 static void increase_watermark(struct userdata *u) {
302     size_t old_watermark;
303     pa_usec_t old_min_latency, new_min_latency;
304
305     pa_assert(u);
306     pa_assert(u->use_tsched);
307
308     /* First, just try to increase the watermark */
309     old_watermark = u->tsched_watermark;
310     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311     fix_tsched_watermark(u);
312
313     if (old_watermark != u->tsched_watermark) {
314         pa_log_info("Increasing wakeup watermark to %0.2f ms",
315                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
316         return;
317     }
318
319     /* Hmm, we cannot increase the watermark any further, hence let's
320      raise the latency unless doing so was disabled in
321      configuration */
322     if (u->fixed_latency_range)
323         return;
324
325     old_min_latency = u->source->thread_info.min_latency;
326     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
328
329     if (old_min_latency != new_min_latency) {
330         pa_log_info("Increasing minimal latency to %0.2f ms",
331                     (double) new_min_latency / PA_USEC_PER_MSEC);
332
333         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
334     }
335
336     /* When we reach this we're officially fucked! */
337 }
338
339 static void decrease_watermark(struct userdata *u) {
340     size_t old_watermark;
341     pa_usec_t now;
342
343     pa_assert(u);
344     pa_assert(u->use_tsched);
345
346     now = pa_rtclock_now();
347
348     if (u->watermark_dec_not_before <= 0)
349         goto restart;
350
351     if (u->watermark_dec_not_before > now)
352         return;
353
354     old_watermark = u->tsched_watermark;
355
356     if (u->tsched_watermark < u->watermark_dec_step)
357         u->tsched_watermark = u->tsched_watermark / 2;
358     else
359         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
360
361     fix_tsched_watermark(u);
362
363     if (old_watermark != u->tsched_watermark)
364         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
366
367     /* We don't change the latency range*/
368
369 restart:
370     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
371 }
372
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
374     pa_usec_t wm, usec;
375
376     pa_assert(sleep_usec);
377     pa_assert(process_usec);
378
379     pa_assert(u);
380     pa_assert(u->use_tsched);
381
382     usec = pa_source_get_requested_latency_within_thread(u->source);
383
384     if (usec == (pa_usec_t) -1)
385         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
386
387     wm = u->tsched_watermark_usec;
388
389     if (wm > usec)
390         wm = usec/2;
391
392     *sleep_usec = usec - wm;
393     *process_usec = wm;
394
395 #ifdef DEBUG_TIMING
396     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397                  (unsigned long) (usec / PA_USEC_PER_MSEC),
398                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
400 #endif
401 }
402
403 static int try_recover(struct userdata *u, const char *call, int err) {
404     pa_assert(u);
405     pa_assert(call);
406     pa_assert(err < 0);
407
408     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
409
410     pa_assert(err != -EAGAIN);
411
412     if (err == -EPIPE)
413         pa_log_debug("%s: Buffer overrun!", call);
414
415     if (err == -ESTRPIPE)
416         pa_log_debug("%s: System suspended!", call);
417
418     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419         pa_log("%s: %s", call, pa_alsa_strerror(err));
420         return -1;
421     }
422
423     u->first = true;
424     return 0;
425 }
426
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428     size_t left_to_record;
429     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430     bool overrun = false;
431
432     /* We use <= instead of < for this check here because an overrun
433      * only happens after the last sample was processed, not already when
434      * it is removed from the buffer. This is particularly important
435      * when block transfer is used. */
436
437     if (n_bytes <= rec_space)
438         left_to_record = rec_space - n_bytes;
439     else {
440
441         /* We got a dropout. What a mess! */
442         left_to_record = 0;
443         overrun = true;
444
445 #ifdef DEBUG_TIMING
446         PA_DEBUG_TRAP;
447 #endif
448
449         if (pa_log_ratelimit(PA_LOG_INFO))
450             pa_log_info("Overrun!");
451     }
452
453 #ifdef DEBUG_TIMING
454     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457     if (u->use_tsched) {
458         bool reset_not_before = true;
459
460         if (overrun || left_to_record < u->watermark_inc_threshold)
461             increase_watermark(u);
462         else if (left_to_record > u->watermark_dec_threshold) {
463             reset_not_before = false;
464
465             /* We decrease the watermark only if have actually
466              * been woken up by a timeout. If something else woke
467              * us up it's too easy to fulfill the deadlines... */
468
469             if (on_timeout)
470                 decrease_watermark(u);
471         }
472
473         if (reset_not_before)
474             u->watermark_dec_not_before = 0;
475     }
476
477     return left_to_record;
478 }
479
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481     bool work_done = false;
482     pa_usec_t max_sleep_usec = 0, process_usec = 0;
483     size_t left_to_record;
484     unsigned j = 0;
485
486     pa_assert(u);
487     pa_source_assert_ref(u->source);
488
489     if (u->use_tsched)
490         hw_sleep_time(u, &max_sleep_usec, &process_usec);
491
492     for (;;) {
493         snd_pcm_sframes_t n;
494         size_t n_bytes;
495         int r;
496         bool after_avail = true;
497
498         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
499
500             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
501                 continue;
502
503             return r;
504         }
505
506         n_bytes = (size_t) n * u->frame_size;
507
508 #ifdef DEBUG_TIMING
509         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
510 #endif
511
512         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
513         on_timeout = false;
514
515         if (u->use_tsched)
516             if (!polled &&
517                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
518 #ifdef DEBUG_TIMING
519                 pa_log_debug("Not reading, because too early.");
520 #endif
521                 break;
522             }
523
524         if (PA_UNLIKELY(n_bytes <= 0)) {
525
526             if (polled)
527                 PA_ONCE_BEGIN {
528                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
530                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
532                            pa_strnull(dn));
533                     pa_xfree(dn);
534                 } PA_ONCE_END;
535
536 #ifdef DEBUG_TIMING
537             pa_log_debug("Not reading, because not necessary.");
538 #endif
539             break;
540         }
541
542         if (++j > 10) {
543 #ifdef DEBUG_TIMING
544             pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547             break;
548         }
549
550         polled = false;
551
552 #ifdef DEBUG_TIMING
553         pa_log_debug("Reading");
554 #endif
555
556         for (;;) {
557             pa_memchunk chunk;
558             void *p;
559             int err;
560             const snd_pcm_channel_area_t *areas;
561             snd_pcm_uframes_t offset, frames;
562             snd_pcm_sframes_t sframes;
563
564             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
566
567             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
568
569                 if (!after_avail && err == -EAGAIN)
570                     break;
571
572                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
573                     continue;
574
575                 return r;
576             }
577
578             /* Make sure that if these memblocks need to be copied they will fit into one slot */
579             frames = PA_MIN(frames, u->frames_per_block);
580
581             if (!after_avail && frames == 0)
582                 break;
583
584             pa_assert(frames > 0);
585             after_avail = false;
586
587             /* Check these are multiples of 8 bit */
588             pa_assert((areas[0].first & 7) == 0);
589             pa_assert((areas[0].step & 7) == 0);
590
591             /* We assume a single interleaved memory buffer */
592             pa_assert((areas[0].first >> 3) == 0);
593             pa_assert((areas[0].step >> 3) == u->frame_size);
594
595             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
596
597             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
598             chunk.length = pa_memblock_get_length(chunk.memblock);
599             chunk.index = 0;
600
601             pa_source_post(u->source, &chunk);
602             pa_memblock_unref_fixed(chunk.memblock);
603
604             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
605
606                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
607                     continue;
608
609                 return r;
610             }
611
612             work_done = true;
613
614             u->read_count += frames * u->frame_size;
615
616 #ifdef DEBUG_TIMING
617             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
618 #endif
619
620             if ((size_t) frames * u->frame_size >= n_bytes)
621                 break;
622
623             n_bytes -= (size_t) frames * u->frame_size;
624         }
625     }
626
627     if (u->use_tsched) {
628         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
629         process_usec = u->tsched_watermark_usec;
630
631         if (*sleep_usec > process_usec)
632             *sleep_usec -= process_usec;
633         else
634             *sleep_usec = 0;
635     }
636
637     return work_done ? 1 : 0;
638 }
639
640 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
641     int work_done = false;
642     pa_usec_t max_sleep_usec = 0, process_usec = 0;
643     size_t left_to_record;
644     unsigned j = 0;
645
646     pa_assert(u);
647     pa_source_assert_ref(u->source);
648
649     if (u->use_tsched)
650         hw_sleep_time(u, &max_sleep_usec, &process_usec);
651
652     for (;;) {
653         snd_pcm_sframes_t n;
654         size_t n_bytes;
655         int r;
656         bool after_avail = true;
657
658         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
659
660             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
661                 continue;
662
663             return r;
664         }
665
666         n_bytes = (size_t) n * u->frame_size;
667         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
668         on_timeout = false;
669
670         if (u->use_tsched)
671             if (!polled &&
672                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
673                 break;
674
675         if (PA_UNLIKELY(n_bytes <= 0)) {
676
677             if (polled)
678                 PA_ONCE_BEGIN {
679                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
680                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
681                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
682                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
683                            pa_strnull(dn));
684                     pa_xfree(dn);
685                 } PA_ONCE_END;
686
687             break;
688         }
689
690         if (++j > 10) {
691 #ifdef DEBUG_TIMING
692             pa_log_debug("Not filling up, because already too many iterations.");
693 #endif
694
695             break;
696         }
697
698         polled = false;
699
700         for (;;) {
701             void *p;
702             snd_pcm_sframes_t frames;
703             pa_memchunk chunk;
704
705             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
706
707             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
708
709             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
710                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
711
712 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
713
714             p = pa_memblock_acquire(chunk.memblock);
715             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
716             pa_memblock_release(chunk.memblock);
717
718             if (PA_UNLIKELY(frames < 0)) {
719                 pa_memblock_unref(chunk.memblock);
720
721                 if (!after_avail && (int) frames == -EAGAIN)
722                     break;
723
724                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
725                     continue;
726
727                 return r;
728             }
729
730             if (!after_avail && frames == 0) {
731                 pa_memblock_unref(chunk.memblock);
732                 break;
733             }
734
735             pa_assert(frames > 0);
736             after_avail = false;
737
738             chunk.index = 0;
739             chunk.length = (size_t) frames * u->frame_size;
740
741             pa_source_post(u->source, &chunk);
742             pa_memblock_unref(chunk.memblock);
743
744             work_done = true;
745
746             u->read_count += frames * u->frame_size;
747
748 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
749
750             if ((size_t) frames * u->frame_size >= n_bytes)
751                 break;
752
753             n_bytes -= (size_t) frames * u->frame_size;
754         }
755     }
756
757     if (u->use_tsched) {
758         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
759         process_usec = u->tsched_watermark_usec;
760
761         if (*sleep_usec > process_usec)
762             *sleep_usec -= process_usec;
763         else
764             *sleep_usec = 0;
765     }
766
767     return work_done ? 1 : 0;
768 }
769
770 static void update_smoother(struct userdata *u) {
771     snd_pcm_sframes_t delay = 0;
772     uint64_t position;
773     int err;
774     pa_usec_t now1 = 0, now2;
775     snd_pcm_status_t *status;
776     snd_htimestamp_t htstamp = { 0, 0 };
777
778     snd_pcm_status_alloca(&status);
779
780     pa_assert(u);
781     pa_assert(u->pcm_handle);
782
783     /* Let's update the time smoother */
784
785     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
786         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
787         return;
788     }
789
790     snd_pcm_status_get_htstamp(status, &htstamp);
791     now1 = pa_timespec_load(&htstamp);
792
793     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
794     if (now1 <= 0)
795         now1 = pa_rtclock_now();
796
797     /* check if the time since the last update is bigger than the interval */
798     if (u->last_smoother_update > 0)
799         if (u->last_smoother_update + u->smoother_interval > now1)
800             return;
801
802     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
803     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
804
805     pa_smoother_put(u->smoother, now1, now2);
806
807     u->last_smoother_update = now1;
808     /* exponentially increase the update interval up to the MAX limit */
809     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
810 }
811
812 static int64_t source_get_latency(struct userdata *u) {
813     int64_t delay;
814     pa_usec_t now1, now2;
815
816     pa_assert(u);
817
818     now1 = pa_rtclock_now();
819     now2 = pa_smoother_get(u->smoother, now1);
820
821     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
822
823     return delay;
824 }
825
826 static int build_pollfd(struct userdata *u) {
827     pa_assert(u);
828     pa_assert(u->pcm_handle);
829
830     if (u->alsa_rtpoll_item)
831         pa_rtpoll_item_free(u->alsa_rtpoll_item);
832
833     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
834         return -1;
835
836     return 0;
837 }
838
839 /* Called from IO context */
840 static int suspend(struct userdata *u) {
841     pa_assert(u);
842     pa_assert(u->pcm_handle);
843
844     pa_smoother_pause(u->smoother, pa_rtclock_now());
845
846     /* Let's suspend */
847     snd_pcm_close(u->pcm_handle);
848     u->pcm_handle = NULL;
849
850     if (u->alsa_rtpoll_item) {
851         pa_rtpoll_item_free(u->alsa_rtpoll_item);
852         u->alsa_rtpoll_item = NULL;
853     }
854
855     pa_log_info("Device suspended...");
856
857     return 0;
858 }
859
860 /* Called from IO context */
861 static int update_sw_params(struct userdata *u) {
862     snd_pcm_uframes_t avail_min;
863     int err;
864
865     pa_assert(u);
866
867     /* Use the full buffer if no one asked us for anything specific */
868     u->hwbuf_unused = 0;
869
870     if (u->use_tsched) {
871         pa_usec_t latency;
872
873         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
874             size_t b;
875
876             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
877
878             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
879
880             /* We need at least one sample in our buffer */
881
882             if (PA_UNLIKELY(b < u->frame_size))
883                 b = u->frame_size;
884
885             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
886         }
887
888         fix_min_sleep_wakeup(u);
889         fix_tsched_watermark(u);
890     }
891
892     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
893
894     avail_min = 1;
895
896     if (u->use_tsched) {
897         pa_usec_t sleep_usec, process_usec;
898
899         hw_sleep_time(u, &sleep_usec, &process_usec);
900         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
901     }
902
903     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
904
905     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
906         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
907         return err;
908     }
909
910     return 0;
911 }
912
913 /* Called from IO Context on unsuspend or from main thread when creating source */
914 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
915                             bool in_thread) {
916     u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
917
918     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
919     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
920
921     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
922     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
923
924     fix_min_sleep_wakeup(u);
925     fix_tsched_watermark(u);
926
927     if (in_thread)
928         pa_source_set_latency_range_within_thread(u->source,
929                                                   u->min_latency_ref,
930                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
931     else {
932         pa_source_set_latency_range(u->source,
933                                     0,
934                                     pa_bytes_to_usec(u->hwbuf_size, ss));
935
936         /* work-around assert in pa_source_set_latency_within_thead,
937            keep track of min_latency and reuse it when
938            this routine is called from IO context */
939         u->min_latency_ref = u->source->thread_info.min_latency;
940     }
941
942     pa_log_info("Time scheduling watermark is %0.2fms",
943                 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
944 }
945
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
948     pa_sample_spec ss;
949     int err;
950     bool b, d;
951     snd_pcm_uframes_t period_size, buffer_size;
952
953     pa_assert(u);
954     pa_assert(!u->pcm_handle);
955
956     pa_log_info("Trying resume...");
957
958     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
959                             SND_PCM_NONBLOCK|
960                             SND_PCM_NO_AUTO_RESAMPLE|
961                             SND_PCM_NO_AUTO_CHANNELS|
962                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
963         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
964         goto fail;
965     }
966
967     ss = u->source->sample_spec;
968     period_size = u->fragment_size / u->frame_size;
969     buffer_size = u->hwbuf_size / u->frame_size;
970     b = u->use_mmap;
971     d = u->use_tsched;
972
973     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
974         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
975         goto fail;
976     }
977
978     if (b != u->use_mmap || d != u->use_tsched) {
979         pa_log_warn("Resume failed, couldn't get original access mode.");
980         goto fail;
981     }
982
983     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
984         pa_log_warn("Resume failed, couldn't restore original sample settings.");
985         goto fail;
986     }
987
988     if (period_size*u->frame_size != u->fragment_size ||
989         buffer_size*u->frame_size != u->hwbuf_size) {
990         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
993         goto fail;
994     }
995
996     if (update_sw_params(u) < 0)
997         goto fail;
998
999     if (build_pollfd(u) < 0)
1000         goto fail;
1001
1002     /* FIXME: We need to reload the volume somehow */
1003
1004     u->read_count = 0;
1005     pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1006     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1007     u->last_smoother_update = 0;
1008
1009     u->first = true;
1010
1011     /* reset the watermark to the value defined when source was created */
1012     if (u->use_tsched)
1013         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1014
1015     pa_log_info("Resumed successfully...");
1016
1017     return 0;
1018
1019 fail:
1020     if (u->pcm_handle) {
1021         snd_pcm_close(u->pcm_handle);
1022         u->pcm_handle = NULL;
1023     }
1024
1025     return -PA_ERR_IO;
1026 }
1027
1028 /* Called from IO context */
1029 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1030     struct userdata *u = PA_SOURCE(o)->userdata;
1031
1032     switch (code) {
1033
1034         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1035             int64_t r = 0;
1036
1037             if (u->pcm_handle)
1038                 r = source_get_latency(u);
1039
1040             *((int64_t*) data) = r;
1041
1042             return 0;
1043         }
1044
1045         case PA_SOURCE_MESSAGE_SET_STATE:
1046
1047             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1048
1049                 case PA_SOURCE_SUSPENDED: {
1050                     int r;
1051
1052                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1053
1054                     if ((r = suspend(u)) < 0)
1055                         return r;
1056
1057                     break;
1058                 }
1059
1060                 case PA_SOURCE_IDLE:
1061                 case PA_SOURCE_RUNNING: {
1062                     int r;
1063
1064                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1065                         if (build_pollfd(u) < 0)
1066                             return -PA_ERR_IO;
1067                     }
1068
1069                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1070                         if ((r = unsuspend(u)) < 0)
1071                             return r;
1072                     }
1073
1074                     break;
1075                 }
1076
1077                 case PA_SOURCE_UNLINKED:
1078                 case PA_SOURCE_INIT:
1079                 case PA_SOURCE_INVALID_STATE:
1080                     ;
1081             }
1082
1083             break;
1084     }
1085
1086     return pa_source_process_msg(o, code, data, offset, chunk);
1087 }
1088
1089 /* Called from main context */
1090 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1091     pa_source_state_t old_state;
1092     struct userdata *u;
1093
1094     pa_source_assert_ref(s);
1095     pa_assert_se(u = s->userdata);
1096
1097     old_state = pa_source_get_state(u->source);
1098
1099     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1100         reserve_done(u);
1101     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1102         if (reserve_init(u, u->device_name) < 0)
1103             return -PA_ERR_BUSY;
1104
1105     return 0;
1106 }
1107
1108 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1109     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1110
1111     pa_assert(u);
1112     pa_assert(u->mixer_handle);
1113
1114     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1115         return 0;
1116
1117     if (!PA_SOURCE_IS_LINKED(u->source->state))
1118         return 0;
1119
1120     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1121         pa_source_set_mixer_dirty(u->source, true);
1122         return 0;
1123     }
1124
1125     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1126         pa_source_get_volume(u->source, true);
1127         pa_source_get_mute(u->source, true);
1128     }
1129
1130     return 0;
1131 }
1132
1133 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1134     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1135
1136     pa_assert(u);
1137     pa_assert(u->mixer_handle);
1138
1139     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1140         return 0;
1141
1142     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1143         pa_source_set_mixer_dirty(u->source, true);
1144         return 0;
1145     }
1146
1147     if (mask & SND_CTL_EVENT_MASK_VALUE)
1148         pa_source_update_volume_and_mute(u->source);
1149
1150     return 0;
1151 }
1152
1153 static void source_get_volume_cb(pa_source *s) {
1154     struct userdata *u = s->userdata;
1155     pa_cvolume r;
1156     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1157
1158     pa_assert(u);
1159     pa_assert(u->mixer_path);
1160     pa_assert(u->mixer_handle);
1161
1162     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1163         return;
1164
1165     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1166     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1167
1168     pa_log_debug("Read hardware volume: %s",
1169                  pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1170
1171     if (pa_cvolume_equal(&u->hardware_volume, &r))
1172         return;
1173
1174     s->real_volume = u->hardware_volume = r;
1175
1176     /* Hmm, so the hardware volume changed, let's reset our software volume */
1177     if (u->mixer_path->has_dB)
1178         pa_source_set_soft_volume(s, NULL);
1179 }
1180
1181 static void source_set_volume_cb(pa_source *s) {
1182     struct userdata *u = s->userdata;
1183     pa_cvolume r;
1184     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1185     bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1186
1187     pa_assert(u);
1188     pa_assert(u->mixer_path);
1189     pa_assert(u->mixer_handle);
1190
1191     /* Shift up by the base volume */
1192     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1193
1194     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1195         return;
1196
1197     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1198     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1199
1200     u->hardware_volume = r;
1201
1202     if (u->mixer_path->has_dB) {
1203         pa_cvolume new_soft_volume;
1204         bool accurate_enough;
1205
1206         /* Match exactly what the user requested by software */
1207         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1208
1209         /* If the adjustment to do in software is only minimal we
1210          * can skip it. That saves us CPU at the expense of a bit of
1211          * accuracy */
1212         accurate_enough =
1213             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1214             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1215
1216         pa_log_debug("Requested volume: %s",
1217                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1218         pa_log_debug("Got hardware volume: %s",
1219                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1220         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1221                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1222                      pa_yes_no(accurate_enough));
1223
1224         if (!accurate_enough)
1225             s->soft_volume = new_soft_volume;
1226
1227     } else {
1228         pa_log_debug("Wrote hardware volume: %s",
1229                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1230
1231         /* We can't match exactly what the user requested, hence let's
1232          * at least tell the user about it */
1233
1234         s->real_volume = r;
1235     }
1236 }
1237
1238 static void source_write_volume_cb(pa_source *s) {
1239     struct userdata *u = s->userdata;
1240     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1241
1242     pa_assert(u);
1243     pa_assert(u->mixer_path);
1244     pa_assert(u->mixer_handle);
1245     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1246
1247     /* Shift up by the base volume */
1248     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1249
1250     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1251         pa_log_error("Writing HW volume failed");
1252     else {
1253         pa_cvolume tmp_vol;
1254         bool accurate_enough;
1255
1256         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1258
1259         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1260         accurate_enough =
1261             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1262             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1263
1264         if (!accurate_enough) {
1265             char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1266
1267             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1268                          pa_cvolume_snprint_verbose(volume_buf[0],
1269                                                     sizeof(volume_buf[0]),
1270                                                     &s->thread_info.current_hw_volume,
1271                                                     &s->channel_map,
1272                                                     true),
1273                          pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1274         }
1275     }
1276 }
1277
1278 static int source_get_mute_cb(pa_source *s, bool *mute) {
1279     struct userdata *u = s->userdata;
1280
1281     pa_assert(u);
1282     pa_assert(u->mixer_path);
1283     pa_assert(u->mixer_handle);
1284
1285     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1286         return -1;
1287
1288     return 0;
1289 }
1290
1291 static void source_set_mute_cb(pa_source *s) {
1292     struct userdata *u = s->userdata;
1293
1294     pa_assert(u);
1295     pa_assert(u->mixer_path);
1296     pa_assert(u->mixer_handle);
1297
1298     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1299 }
1300
1301 static void mixer_volume_init(struct userdata *u) {
1302     pa_assert(u);
1303
1304     if (!u->mixer_path->has_volume) {
1305         pa_source_set_write_volume_callback(u->source, NULL);
1306         pa_source_set_get_volume_callback(u->source, NULL);
1307         pa_source_set_set_volume_callback(u->source, NULL);
1308
1309         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1310     } else {
1311         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1312         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1313
1314         if (u->mixer_path->has_dB && u->deferred_volume) {
1315             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1316             pa_log_info("Successfully enabled deferred volume.");
1317         } else
1318             pa_source_set_write_volume_callback(u->source, NULL);
1319
1320         if (u->mixer_path->has_dB) {
1321             pa_source_enable_decibel_volume(u->source, true);
1322             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1323
1324             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1325             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1326
1327             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1328         } else {
1329             pa_source_enable_decibel_volume(u->source, false);
1330             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1331
1332             u->source->base_volume = PA_VOLUME_NORM;
1333             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1334         }
1335
1336         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1337     }
1338
1339     if (!u->mixer_path->has_mute) {
1340         pa_source_set_get_mute_callback(u->source, NULL);
1341         pa_source_set_set_mute_callback(u->source, NULL);
1342         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1343     } else {
1344         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1345         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1346         pa_log_info("Using hardware mute control.");
1347     }
1348 }
1349
1350 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1351     struct userdata *u = s->userdata;
1352
1353     pa_assert(u);
1354     pa_assert(p);
1355     pa_assert(u->ucm_context);
1356
1357     return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1358 }
1359
1360 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1361     struct userdata *u = s->userdata;
1362     pa_alsa_port_data *data;
1363
1364     pa_assert(u);
1365     pa_assert(p);
1366     pa_assert(u->mixer_handle);
1367
1368     data = PA_DEVICE_PORT_DATA(p);
1369
1370     pa_assert_se(u->mixer_path = data->path);
1371     pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1372
1373     mixer_volume_init(u);
1374
1375     if (s->set_mute)
1376         s->set_mute(s);
1377     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1378         if (s->write_volume)
1379             s->write_volume(s);
1380     } else {
1381         if (s->set_volume)
1382             s->set_volume(s);
1383     }
1384
1385     return 0;
1386 }
1387
1388 static void source_update_requested_latency_cb(pa_source *s) {
1389     struct userdata *u = s->userdata;
1390     pa_assert(u);
1391     pa_assert(u->use_tsched); /* only when timer scheduling is used
1392                                * we can dynamically adjust the
1393                                * latency */
1394
1395     if (!u->pcm_handle)
1396         return;
1397
1398     update_sw_params(u);
1399 }
1400
1401 static int source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1402     struct userdata *u = s->userdata;
1403     int i;
1404     bool supported = false;
1405
1406     /* FIXME: we only update rate for now */
1407
1408     pa_assert(u);
1409
1410     for (i = 0; u->rates[i]; i++) {
1411         if (u->rates[i] == spec->rate) {
1412             supported = true;
1413             break;
1414         }
1415     }
1416
1417     if (!supported) {
1418         pa_log_info("Source does not support sample rate of %d Hz", spec->rate);
1419         return -1;
1420     }
1421
1422     if (!PA_SOURCE_IS_OPENED(s->state)) {
1423         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, spec->rate);
1424         u->source->sample_spec.rate = spec->rate;
1425         return 0;
1426     }
1427
1428     return -1;
1429 }
1430
1431 static void thread_func(void *userdata) {
1432     struct userdata *u = userdata;
1433     unsigned short revents = 0;
1434
1435     pa_assert(u);
1436
1437     pa_log_debug("Thread starting up");
1438
1439     if (u->core->realtime_scheduling)
1440         pa_make_realtime(u->core->realtime_priority);
1441
1442     pa_thread_mq_install(&u->thread_mq);
1443
1444     for (;;) {
1445         int ret;
1446         pa_usec_t rtpoll_sleep = 0, real_sleep;
1447
1448 #ifdef DEBUG_TIMING
1449         pa_log_debug("Loop");
1450 #endif
1451
1452         /* Read some data and pass it to the sources */
1453         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1454             int work_done;
1455             pa_usec_t sleep_usec = 0;
1456             bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1457
1458             if (u->first) {
1459                 pa_log_info("Starting capture.");
1460                 snd_pcm_start(u->pcm_handle);
1461
1462                 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1463
1464                 u->first = false;
1465             }
1466
1467             if (u->use_mmap)
1468                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1469             else
1470                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1471
1472             if (work_done < 0)
1473                 goto fail;
1474
1475 /*             pa_log_debug("work_done = %i", work_done); */
1476
1477             if (work_done)
1478                 update_smoother(u);
1479
1480             if (u->use_tsched) {
1481                 pa_usec_t cusec;
1482
1483                 /* OK, the capture buffer is now empty, let's
1484                  * calculate when to wake up next */
1485
1486 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1487
1488                 /* Convert from the sound card time domain to the
1489                  * system time domain */
1490                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1491
1492 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1493
1494                 /* We don't trust the conversion, so we wake up whatever comes first */
1495                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1496             }
1497         }
1498
1499         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1500             pa_usec_t volume_sleep;
1501             pa_source_volume_change_apply(u->source, &volume_sleep);
1502             if (volume_sleep > 0) {
1503                 if (rtpoll_sleep > 0)
1504                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1505                 else
1506                     rtpoll_sleep = volume_sleep;
1507             }
1508         }
1509
1510         if (rtpoll_sleep > 0) {
1511             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1512             real_sleep = pa_rtclock_now();
1513         }
1514         else
1515             pa_rtpoll_set_timer_disabled(u->rtpoll);
1516
1517         /* Hmm, nothing to do. Let's sleep */
1518         if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1519             goto fail;
1520
1521         if (rtpoll_sleep > 0) {
1522             real_sleep = pa_rtclock_now() - real_sleep;
1523 #ifdef DEBUG_TIMING
1524             pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1525                 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1526                 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1527 #endif
1528             if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1529                 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1530                     (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1531                     (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1532         }
1533
1534         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1535             pa_source_volume_change_apply(u->source, NULL);
1536
1537         if (ret == 0)
1538             goto finish;
1539
1540         /* Tell ALSA about this and process its response */
1541         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1542             struct pollfd *pollfd;
1543             int err;
1544             unsigned n;
1545
1546             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1547
1548             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1549                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1550                 goto fail;
1551             }
1552
1553             if (revents & ~POLLIN) {
1554                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1555                     goto fail;
1556
1557                 u->first = true;
1558                 revents = 0;
1559             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1560                 pa_log_debug("Wakeup from ALSA!");
1561
1562         } else
1563             revents = 0;
1564     }
1565
1566 fail:
1567     /* If this was no regular exit from the loop we have to continue
1568      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1569     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1570     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1571
1572 finish:
1573     pa_log_debug("Thread shutting down");
1574 }
1575
1576 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1577     const char *n;
1578     char *t;
1579
1580     pa_assert(data);
1581     pa_assert(ma);
1582     pa_assert(device_name);
1583
1584     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1585         pa_source_new_data_set_name(data, n);
1586         data->namereg_fail = true;
1587         return;
1588     }
1589
1590     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1591         data->namereg_fail = true;
1592     else {
1593         n = device_id ? device_id : device_name;
1594         data->namereg_fail = false;
1595     }
1596
1597     if (mapping)
1598         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1599     else
1600         t = pa_sprintf_malloc("alsa_input.%s", n);
1601
1602     pa_source_new_data_set_name(data, t);
1603     pa_xfree(t);
1604 }
1605
1606 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1607     if (!mapping && !element)
1608         return;
1609
1610     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1611         pa_log_info("Failed to find a working mixer device.");
1612         return;
1613     }
1614
1615     if (element) {
1616
1617         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1618             goto fail;
1619
1620         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1621             goto fail;
1622
1623         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1624         pa_alsa_path_dump(u->mixer_path);
1625     } else if (!(u->mixer_path_set = mapping->input_path_set))
1626         goto fail;
1627
1628     return;
1629
1630 fail:
1631
1632     if (u->mixer_path) {
1633         pa_alsa_path_free(u->mixer_path);
1634         u->mixer_path = NULL;
1635     }
1636
1637     if (u->mixer_handle) {
1638         snd_mixer_close(u->mixer_handle);
1639         u->mixer_handle = NULL;
1640     }
1641 }
1642
1643 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1644     bool need_mixer_callback = false;
1645
1646     pa_assert(u);
1647
1648     if (!u->mixer_handle)
1649         return 0;
1650
1651     if (u->source->active_port) {
1652         pa_alsa_port_data *data;
1653
1654         /* We have a list of supported paths, so let's activate the
1655          * one that has been chosen as active */
1656
1657         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1658         u->mixer_path = data->path;
1659
1660         pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1661
1662     } else {
1663
1664         if (!u->mixer_path && u->mixer_path_set)
1665             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1666
1667         if (u->mixer_path) {
1668             /* Hmm, we have only a single path, then let's activate it */
1669
1670             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1671         } else
1672             return 0;
1673     }
1674
1675     mixer_volume_init(u);
1676
1677     /* Will we need to register callbacks? */
1678     if (u->mixer_path_set && u->mixer_path_set->paths) {
1679         pa_alsa_path *p;
1680         void *state;
1681
1682         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1683             if (p->has_volume || p->has_mute)
1684                 need_mixer_callback = true;
1685         }
1686     }
1687     else if (u->mixer_path)
1688         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1689
1690     if (need_mixer_callback) {
1691         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1692         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1693             u->mixer_pd = pa_alsa_mixer_pdata_new();
1694             mixer_callback = io_mixer_callback;
1695
1696             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1697                 pa_log("Failed to initialize file descriptor monitoring");
1698                 return -1;
1699             }
1700         } else {
1701             u->mixer_fdl = pa_alsa_fdlist_new();
1702             mixer_callback = ctl_mixer_callback;
1703
1704             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1705                 pa_log("Failed to initialize file descriptor monitoring");
1706                 return -1;
1707             }
1708         }
1709
1710         if (u->mixer_path_set)
1711             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1712         else
1713             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1714     }
1715
1716     return 0;
1717 }
1718
1719 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1720
1721     struct userdata *u = NULL;
1722     const char *dev_id = NULL, *key, *mod_name;
1723     pa_sample_spec ss;
1724     char *thread_name = NULL;
1725     uint32_t alternate_sample_rate;
1726     pa_channel_map map;
1727     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1728     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1729     size_t frame_size;
1730     bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1731     pa_source_new_data data;
1732     bool volume_is_set;
1733     bool mute_is_set;
1734     pa_alsa_profile_set *profile_set = NULL;
1735     void *state = NULL;
1736
1737     pa_assert(m);
1738     pa_assert(ma);
1739
1740     ss = m->core->default_sample_spec;
1741     map = m->core->default_channel_map;
1742
1743     /* Pick sample spec overrides from the mapping, if any */
1744     if (mapping) {
1745         if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1746             ss.format = mapping->sample_spec.format;
1747         if (mapping->sample_spec.rate != 0)
1748             ss.rate = mapping->sample_spec.rate;
1749         if (mapping->sample_spec.channels != 0) {
1750             ss.channels = mapping->sample_spec.channels;
1751             if (pa_channel_map_valid(&mapping->channel_map))
1752                 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1753         }
1754     }
1755
1756     /* Override with modargs if provided */
1757     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1758         pa_log("Failed to parse sample specification and channel map");
1759         goto fail;
1760     }
1761
1762     alternate_sample_rate = m->core->alternate_sample_rate;
1763     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1764         pa_log("Failed to parse alternate sample rate");
1765         goto fail;
1766     }
1767
1768     frame_size = pa_frame_size(&ss);
1769
1770     nfrags = m->core->default_n_fragments;
1771     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1772     if (frag_size <= 0)
1773         frag_size = (uint32_t) frame_size;
1774     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1775     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1776
1777     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1778         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1779         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1780         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1781         pa_log("Failed to parse buffer metrics");
1782         goto fail;
1783     }
1784
1785     buffer_size = nfrags * frag_size;
1786
1787     period_frames = frag_size/frame_size;
1788     buffer_frames = buffer_size/frame_size;
1789     tsched_frames = tsched_size/frame_size;
1790
1791     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1792         pa_log("Failed to parse mmap argument.");
1793         goto fail;
1794     }
1795
1796     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1797         pa_log("Failed to parse tsched argument.");
1798         goto fail;
1799     }
1800
1801     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1802         pa_log("Failed to parse ignore_dB argument.");
1803         goto fail;
1804     }
1805
1806     deferred_volume = m->core->deferred_volume;
1807     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1808         pa_log("Failed to parse deferred_volume argument.");
1809         goto fail;
1810     }
1811
1812     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1813         pa_log("Failed to parse fixed_latency_range argument.");
1814         goto fail;
1815     }
1816
1817     use_tsched = pa_alsa_may_tsched(use_tsched);
1818
1819     u = pa_xnew0(struct userdata, 1);
1820     u->core = m->core;
1821     u->module = m;
1822     u->use_mmap = use_mmap;
1823     u->use_tsched = use_tsched;
1824     u->deferred_volume = deferred_volume;
1825     u->fixed_latency_range = fixed_latency_range;
1826     u->first = true;
1827     u->rtpoll = pa_rtpoll_new();
1828
1829     if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
1830         pa_log("pa_thread_mq_init() failed.");
1831         goto fail;
1832     }
1833
1834     u->smoother = pa_smoother_new(
1835             SMOOTHER_ADJUST_USEC,
1836             SMOOTHER_WINDOW_USEC,
1837             true,
1838             true,
1839             5,
1840             pa_rtclock_now(),
1841             true);
1842     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1843
1844     /* use ucm */
1845     if (mapping && mapping->ucm_context.ucm)
1846         u->ucm_context = &mapping->ucm_context;
1847
1848     dev_id = pa_modargs_get_value(
1849             ma, "device_id",
1850             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1851
1852     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1853
1854     if (reserve_init(u, dev_id) < 0)
1855         goto fail;
1856
1857     if (reserve_monitor_init(u, dev_id) < 0)
1858         goto fail;
1859
1860     b = use_mmap;
1861     d = use_tsched;
1862
1863     /* Force ALSA to reread its configuration if module-alsa-card didn't
1864      * do it for us. This matters if our device was hot-plugged after ALSA
1865      * has already read its configuration - see
1866      * https://bugs.freedesktop.org/show_bug.cgi?id=54029
1867      */
1868
1869     if (!card)
1870         snd_config_update_free_global();
1871
1872     if (mapping) {
1873
1874         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1875             pa_log("device_id= not set");
1876             goto fail;
1877         }
1878
1879         if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1880             if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1881                 pa_log("Failed to enable ucm modifier %s", mod_name);
1882             else
1883                 pa_log_debug("Enabled ucm modifier %s", mod_name);
1884         }
1885
1886         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1887                       dev_id,
1888                       &u->device_name,
1889                       &ss, &map,
1890                       SND_PCM_STREAM_CAPTURE,
1891                       &period_frames, &buffer_frames, tsched_frames,
1892                       &b, &d, mapping)))
1893             goto fail;
1894
1895     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1896
1897         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1898             goto fail;
1899
1900         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1901                       dev_id,
1902                       &u->device_name,
1903                       &ss, &map,
1904                       SND_PCM_STREAM_CAPTURE,
1905                       &period_frames, &buffer_frames, tsched_frames,
1906                       &b, &d, profile_set, &mapping)))
1907             goto fail;
1908
1909     } else {
1910
1911         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1912                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1913                       &u->device_name,
1914                       &ss, &map,
1915                       SND_PCM_STREAM_CAPTURE,
1916                       &period_frames, &buffer_frames, tsched_frames,
1917                       &b, &d, false)))
1918             goto fail;
1919     }
1920
1921     pa_assert(u->device_name);
1922     pa_log_info("Successfully opened device %s.", u->device_name);
1923
1924     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1925         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1926         goto fail;
1927     }
1928
1929     if (mapping)
1930         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1931
1932     if (use_mmap && !b) {
1933         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1934         u->use_mmap = use_mmap = false;
1935     }
1936
1937     if (use_tsched && (!b || !d)) {
1938         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1939         u->use_tsched = use_tsched = false;
1940     }
1941
1942     if (u->use_mmap)
1943         pa_log_info("Successfully enabled mmap() mode.");
1944
1945     if (u->use_tsched) {
1946         pa_log_info("Successfully enabled timer-based scheduling mode.");
1947         if (u->fixed_latency_range)
1948             pa_log_info("Disabling latency range changes on overrun");
1949     }
1950
1951     u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1952     if (!u->rates) {
1953         pa_log_error("Failed to find any supported sample rates.");
1954         goto fail;
1955     }
1956
1957     /* ALSA might tweak the sample spec, so recalculate the frame size */
1958     frame_size = pa_frame_size(&ss);
1959
1960     if (!u->ucm_context)
1961         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1962
1963     pa_source_new_data_init(&data);
1964     data.driver = driver;
1965     data.module = m;
1966     data.card = card;
1967     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1968
1969     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1970      * variable instead of using &data.namereg_fail directly, because
1971      * data.namereg_fail is a bitfield and taking the address of a bitfield
1972      * variable is impossible. */
1973     namereg_fail = data.namereg_fail;
1974     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1975         pa_log("Failed to parse namereg_fail argument.");
1976         pa_source_new_data_done(&data);
1977         goto fail;
1978     }
1979     data.namereg_fail = namereg_fail;
1980
1981     pa_source_new_data_set_sample_spec(&data, &ss);
1982     pa_source_new_data_set_channel_map(&data, &map);
1983     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1984
1985     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1986     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1987     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1988     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1989     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1990
1991     if (mapping) {
1992         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1993         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1994
1995         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1996             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1997     }
1998
1999     pa_alsa_init_description(data.proplist, card);
2000
2001     if (u->control_device)
2002         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2003
2004     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2005         pa_log("Invalid properties");
2006         pa_source_new_data_done(&data);
2007         goto fail;
2008     }
2009
2010     if (u->ucm_context)
2011         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
2012     else if (u->mixer_path_set)
2013         pa_alsa_add_ports(&data, u->mixer_path_set, card);
2014
2015     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2016     volume_is_set = data.volume_is_set;
2017     mute_is_set = data.muted_is_set;
2018     pa_source_new_data_done(&data);
2019
2020     if (!u->source) {
2021         pa_log("Failed to create source object");
2022         goto fail;
2023     }
2024
2025     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2026                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
2027         pa_log("Failed to parse deferred_volume_safety_margin parameter");
2028         goto fail;
2029     }
2030
2031     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2032                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
2033         pa_log("Failed to parse deferred_volume_extra_delay parameter");
2034         goto fail;
2035     }
2036
2037     u->source->parent.process_msg = source_process_msg;
2038     if (u->use_tsched)
2039         u->source->update_requested_latency = source_update_requested_latency_cb;
2040     u->source->set_state = source_set_state_cb;
2041     if (u->ucm_context)
2042         u->source->set_port = source_set_port_ucm_cb;
2043     else
2044         u->source->set_port = source_set_port_cb;
2045     if (u->source->alternate_sample_rate)
2046         u->source->reconfigure = source_reconfigure_cb;
2047     u->source->userdata = u;
2048
2049     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2050     pa_source_set_rtpoll(u->source, u->rtpoll);
2051
2052     u->frame_size = frame_size;
2053     u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2054     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2055     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2056     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2057
2058     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2059                 (double) u->hwbuf_size / (double) u->fragment_size,
2060                 (long unsigned) u->fragment_size,
2061                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2062                 (long unsigned) u->hwbuf_size,
2063                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2064
2065     if (u->use_tsched) {
2066         u->tsched_watermark_ref = tsched_watermark;
2067         reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2068     }
2069     else
2070         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2071
2072     reserve_update(u);
2073
2074     if (update_sw_params(u) < 0)
2075         goto fail;
2076
2077     if (u->ucm_context) {
2078         if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2079             goto fail;
2080     } else if (setup_mixer(u, ignore_dB) < 0)
2081         goto fail;
2082
2083     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2084
2085     thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2086     if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2087         pa_log("Failed to create thread.");
2088         goto fail;
2089     }
2090     pa_xfree(thread_name);
2091     thread_name = NULL;
2092
2093     /* Get initial mixer settings */
2094     if (volume_is_set) {
2095         if (u->source->set_volume)
2096             u->source->set_volume(u->source);
2097     } else {
2098         if (u->source->get_volume)
2099             u->source->get_volume(u->source);
2100     }
2101
2102     if (mute_is_set) {
2103         if (u->source->set_mute)
2104             u->source->set_mute(u->source);
2105     } else {
2106         if (u->source->get_mute) {
2107             bool mute;
2108
2109             if (u->source->get_mute(u->source, &mute) >= 0)
2110                 pa_source_set_mute(u->source, mute, false);
2111         }
2112     }
2113
2114     if ((volume_is_set || mute_is_set) && u->source->write_volume)
2115         u->source->write_volume(u->source);
2116
2117     pa_source_put(u->source);
2118
2119     if (profile_set)
2120         pa_alsa_profile_set_free(profile_set);
2121
2122     return u->source;
2123
2124 fail:
2125     pa_xfree(thread_name);
2126
2127     if (u)
2128         userdata_free(u);
2129
2130     if (profile_set)
2131         pa_alsa_profile_set_free(profile_set);
2132
2133     return NULL;
2134 }
2135
2136 static void userdata_free(struct userdata *u) {
2137     pa_assert(u);
2138
2139     if (u->source)
2140         pa_source_unlink(u->source);
2141
2142     if (u->thread) {
2143         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2144         pa_thread_free(u->thread);
2145     }
2146
2147     pa_thread_mq_done(&u->thread_mq);
2148
2149     if (u->source)
2150         pa_source_unref(u->source);
2151
2152     if (u->mixer_pd)
2153         pa_alsa_mixer_pdata_free(u->mixer_pd);
2154
2155     if (u->alsa_rtpoll_item)
2156         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2157
2158     if (u->rtpoll)
2159         pa_rtpoll_free(u->rtpoll);
2160
2161     if (u->pcm_handle) {
2162         snd_pcm_drop(u->pcm_handle);
2163         snd_pcm_close(u->pcm_handle);
2164     }
2165
2166     if (u->mixer_fdl)
2167         pa_alsa_fdlist_free(u->mixer_fdl);
2168
2169     if (u->mixer_path && !u->mixer_path_set)
2170         pa_alsa_path_free(u->mixer_path);
2171
2172     if (u->mixer_handle)
2173         snd_mixer_close(u->mixer_handle);
2174
2175     if (u->smoother)
2176         pa_smoother_free(u->smoother);
2177
2178     if (u->rates)
2179         pa_xfree(u->rates);
2180
2181     reserve_done(u);
2182     monitor_done(u);
2183
2184     pa_xfree(u->device_name);
2185     pa_xfree(u->control_device);
2186     pa_xfree(u->paths_dir);
2187     pa_xfree(u);
2188 }
2189
2190 void pa_alsa_source_free(pa_source *s) {
2191     struct userdata *u;
2192
2193     pa_source_assert_ref(s);
2194     pa_assert_se(u = s->userdata);
2195
2196     userdata_free(u);
2197 }