69da88dab5ae14b98b8a0fc9d27ce27d363ff10a
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <signal.h>
26 #include <stdio.h>
27
28 #include <asoundlib.h>
29
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/volume.h>
33 #include <pulse/xmalloc.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/i18n.h>
37 #include <pulsecore/module.h>
38 #include <pulsecore/memchunk.h>
39 #include <pulsecore/sink.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/core-rtclock.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/thread-mq.h>
48 #include <pulsecore/rtpoll.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include <modules/reserve-wrap.h>
52
53 #include "alsa-util.h"
54 #include "alsa-source.h"
55
56 /* #define DEBUG_TIMING */
57
58 #define DEFAULT_DEVICE "default"
59
60 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
61 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
62
63 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
64 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
65 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
66 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
67 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
68 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
69
70 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
71 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
72
73 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
74 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82     pa_core *core;
83     pa_module *module;
84     pa_source *source;
85
86     pa_thread *thread;
87     pa_thread_mq thread_mq;
88     pa_rtpoll *rtpoll;
89
90     snd_pcm_t *pcm_handle;
91
92     char *paths_dir;
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     unsigned int *rates;
102
103     size_t
104         frame_size,
105         fragment_size,
106         hwbuf_size,
107         tsched_watermark,
108         tsched_watermark_ref,
109         hwbuf_unused,
110         min_sleep,
111         min_wakeup,
112         watermark_inc_step,
113         watermark_dec_step,
114         watermark_inc_threshold,
115         watermark_dec_threshold;
116
117     snd_pcm_uframes_t frames_per_block;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121     pa_usec_t tsched_watermark_usec;
122
123     char *device_name;  /* name of the PCM device */
124     char *control_device; /* name of the control device */
125
126     bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
127
128     bool first;
129
130     pa_rtpoll_item *alsa_rtpoll_item;
131
132     pa_smoother *smoother;
133     uint64_t read_count;
134     pa_usec_t smoother_interval;
135     pa_usec_t last_smoother_update;
136
137     pa_reserve_wrapper *reserve;
138     pa_hook_slot *reserve_slot;
139     pa_reserve_monitor_wrapper *monitor;
140     pa_hook_slot *monitor_slot;
141
142     /* ucm context */
143     pa_alsa_ucm_mapping_context *ucm_context;
144 };
145
146 static void userdata_free(struct userdata *u);
147
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
149     pa_assert(r);
150     pa_assert(u);
151
152     pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
153
154     if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155         return PA_HOOK_CANCEL;
156
157     return PA_HOOK_OK;
158 }
159
160 static void reserve_done(struct userdata *u) {
161     pa_assert(u);
162
163     if (u->reserve_slot) {
164         pa_hook_slot_free(u->reserve_slot);
165         u->reserve_slot = NULL;
166     }
167
168     if (u->reserve) {
169         pa_reserve_wrapper_unref(u->reserve);
170         u->reserve = NULL;
171     }
172 }
173
174 static void reserve_update(struct userdata *u) {
175     const char *description;
176     pa_assert(u);
177
178     if (!u->source || !u->reserve)
179         return;
180
181     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
183 }
184
185 static int reserve_init(struct userdata *u, const char *dname) {
186     char *rname;
187
188     pa_assert(u);
189     pa_assert(dname);
190
191     if (u->reserve)
192         return 0;
193
194     if (pa_in_system_mode())
195         return 0;
196
197     if (!(rname = pa_alsa_get_reserve_name(dname)))
198         return 0;
199
200     /* We are resuming, try to lock the device */
201     u->reserve = pa_reserve_wrapper_get(u->core, rname);
202     pa_xfree(rname);
203
204     if (!(u->reserve))
205         return -1;
206
207     reserve_update(u);
208
209     pa_assert(!u->reserve_slot);
210     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211
212     return 0;
213 }
214
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
216     pa_assert(w);
217     pa_assert(u);
218
219     if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220         pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221         pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
222     } else {
223         pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224         pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
225     }
226
227     return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231     pa_assert(u);
232
233     if (u->monitor_slot) {
234         pa_hook_slot_free(u->monitor_slot);
235         u->monitor_slot = NULL;
236     }
237
238     if (u->monitor) {
239         pa_reserve_monitor_wrapper_unref(u->monitor);
240         u->monitor = NULL;
241     }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245     char *rname;
246
247     pa_assert(u);
248     pa_assert(dname);
249
250     if (pa_in_system_mode())
251         return 0;
252
253     if (!(rname = pa_alsa_get_reserve_name(dname)))
254         return 0;
255
256     /* We are resuming, try to lock the device */
257     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258     pa_xfree(rname);
259
260     if (!(u->monitor))
261         return -1;
262
263     pa_assert(!u->monitor_slot);
264     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266     return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270     size_t max_use, max_use_2;
271
272     pa_assert(u);
273     pa_assert(u->use_tsched);
274
275     max_use = u->hwbuf_size - u->hwbuf_unused;
276     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
277
278     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286     size_t max_use;
287     pa_assert(u);
288     pa_assert(u->use_tsched);
289
290     max_use = u->hwbuf_size - u->hwbuf_unused;
291
292     if (u->tsched_watermark > max_use - u->min_sleep)
293         u->tsched_watermark = max_use - u->min_sleep;
294
295     if (u->tsched_watermark < u->min_wakeup)
296         u->tsched_watermark = u->min_wakeup;
297
298    u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
299 }
300
301 static void increase_watermark(struct userdata *u) {
302     size_t old_watermark;
303     pa_usec_t old_min_latency, new_min_latency;
304
305     pa_assert(u);
306     pa_assert(u->use_tsched);
307
308     /* First, just try to increase the watermark */
309     old_watermark = u->tsched_watermark;
310     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311     fix_tsched_watermark(u);
312
313     if (old_watermark != u->tsched_watermark) {
314         pa_log_info("Increasing wakeup watermark to %0.2f ms",
315                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
316         return;
317     }
318
319     /* Hmm, we cannot increase the watermark any further, hence let's
320      raise the latency unless doing so was disabled in
321      configuration */
322     if (u->fixed_latency_range)
323         return;
324
325     old_min_latency = u->source->thread_info.min_latency;
326     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
328
329     if (old_min_latency != new_min_latency) {
330         pa_log_info("Increasing minimal latency to %0.2f ms",
331                     (double) new_min_latency / PA_USEC_PER_MSEC);
332
333         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
334     }
335
336     /* When we reach this we're officially fucked! */
337 }
338
339 static void decrease_watermark(struct userdata *u) {
340     size_t old_watermark;
341     pa_usec_t now;
342
343     pa_assert(u);
344     pa_assert(u->use_tsched);
345
346     now = pa_rtclock_now();
347
348     if (u->watermark_dec_not_before <= 0)
349         goto restart;
350
351     if (u->watermark_dec_not_before > now)
352         return;
353
354     old_watermark = u->tsched_watermark;
355
356     if (u->tsched_watermark < u->watermark_dec_step)
357         u->tsched_watermark = u->tsched_watermark / 2;
358     else
359         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
360
361     fix_tsched_watermark(u);
362
363     if (old_watermark != u->tsched_watermark)
364         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
366
367     /* We don't change the latency range*/
368
369 restart:
370     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
371 }
372
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
374     pa_usec_t wm, usec;
375
376     pa_assert(sleep_usec);
377     pa_assert(process_usec);
378
379     pa_assert(u);
380     pa_assert(u->use_tsched);
381
382     usec = pa_source_get_requested_latency_within_thread(u->source);
383
384     if (usec == (pa_usec_t) -1)
385         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
386
387     wm = u->tsched_watermark_usec;
388
389     if (wm > usec)
390         wm = usec/2;
391
392     *sleep_usec = usec - wm;
393     *process_usec = wm;
394
395 #ifdef DEBUG_TIMING
396     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397                  (unsigned long) (usec / PA_USEC_PER_MSEC),
398                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
400 #endif
401 }
402
403 static int try_recover(struct userdata *u, const char *call, int err) {
404     pa_assert(u);
405     pa_assert(call);
406     pa_assert(err < 0);
407
408     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
409
410     pa_assert(err != -EAGAIN);
411
412     if (err == -EPIPE)
413         pa_log_debug("%s: Buffer overrun!", call);
414
415     if (err == -ESTRPIPE)
416         pa_log_debug("%s: System suspended!", call);
417
418     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419         pa_log("%s: %s", call, pa_alsa_strerror(err));
420         return -1;
421     }
422
423     u->first = true;
424     return 0;
425 }
426
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428     size_t left_to_record;
429     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430     bool overrun = false;
431
432     /* We use <= instead of < for this check here because an overrun
433      * only happens after the last sample was processed, not already when
434      * it is removed from the buffer. This is particularly important
435      * when block transfer is used. */
436
437     if (n_bytes <= rec_space)
438         left_to_record = rec_space - n_bytes;
439     else {
440
441         /* We got a dropout. What a mess! */
442         left_to_record = 0;
443         overrun = true;
444
445 #ifdef DEBUG_TIMING
446         PA_DEBUG_TRAP;
447 #endif
448
449         if (pa_log_ratelimit(PA_LOG_INFO))
450             pa_log_info("Overrun!");
451     }
452
453 #ifdef DEBUG_TIMING
454     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457     if (u->use_tsched) {
458         bool reset_not_before = true;
459
460         if (overrun || left_to_record < u->watermark_inc_threshold)
461             increase_watermark(u);
462         else if (left_to_record > u->watermark_dec_threshold) {
463             reset_not_before = false;
464
465             /* We decrease the watermark only if have actually
466              * been woken up by a timeout. If something else woke
467              * us up it's too easy to fulfill the deadlines... */
468
469             if (on_timeout)
470                 decrease_watermark(u);
471         }
472
473         if (reset_not_before)
474             u->watermark_dec_not_before = 0;
475     }
476
477     return left_to_record;
478 }
479
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481     bool work_done = false;
482     pa_usec_t max_sleep_usec = 0, process_usec = 0;
483     size_t left_to_record;
484     unsigned j = 0;
485
486     pa_assert(u);
487     pa_source_assert_ref(u->source);
488
489     if (u->use_tsched)
490         hw_sleep_time(u, &max_sleep_usec, &process_usec);
491
492     for (;;) {
493         snd_pcm_sframes_t n;
494         size_t n_bytes;
495         int r;
496         bool after_avail = true;
497
498         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
499
500             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
501                 continue;
502
503             return r;
504         }
505
506         n_bytes = (size_t) n * u->frame_size;
507
508 #ifdef DEBUG_TIMING
509         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
510 #endif
511
512         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
513         on_timeout = false;
514
515         if (u->use_tsched)
516             if (!polled &&
517                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
518 #ifdef DEBUG_TIMING
519                 pa_log_debug("Not reading, because too early.");
520 #endif
521                 break;
522             }
523
524         if (PA_UNLIKELY(n_bytes <= 0)) {
525
526             if (polled)
527                 PA_ONCE_BEGIN {
528                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
530                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
532                            pa_strnull(dn));
533                     pa_xfree(dn);
534                 } PA_ONCE_END;
535
536 #ifdef DEBUG_TIMING
537             pa_log_debug("Not reading, because not necessary.");
538 #endif
539             break;
540         }
541
542         if (++j > 10) {
543 #ifdef DEBUG_TIMING
544             pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547             break;
548         }
549
550         polled = false;
551
552 #ifdef DEBUG_TIMING
553         pa_log_debug("Reading");
554 #endif
555
556         for (;;) {
557             pa_memchunk chunk;
558             void *p;
559             int err;
560             const snd_pcm_channel_area_t *areas;
561             snd_pcm_uframes_t offset, frames;
562             snd_pcm_sframes_t sframes;
563
564             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
566
567             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
568
569                 if (!after_avail && err == -EAGAIN)
570                     break;
571
572                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
573                     continue;
574
575                 return r;
576             }
577
578             /* Make sure that if these memblocks need to be copied they will fit into one slot */
579             frames = PA_MIN(frames, u->frames_per_block);
580
581             if (!after_avail && frames == 0)
582                 break;
583
584             pa_assert(frames > 0);
585             after_avail = false;
586
587             /* Check these are multiples of 8 bit */
588             pa_assert((areas[0].first & 7) == 0);
589             pa_assert((areas[0].step & 7) == 0);
590
591             /* We assume a single interleaved memory buffer */
592             pa_assert((areas[0].first >> 3) == 0);
593             pa_assert((areas[0].step >> 3) == u->frame_size);
594
595             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
596
597             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
598             chunk.length = pa_memblock_get_length(chunk.memblock);
599             chunk.index = 0;
600
601             pa_source_post(u->source, &chunk);
602             pa_memblock_unref_fixed(chunk.memblock);
603
604             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
605
606                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
607                     continue;
608
609                 return r;
610             }
611
612             work_done = true;
613
614             u->read_count += frames * u->frame_size;
615
616 #ifdef DEBUG_TIMING
617             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
618 #endif
619
620             if ((size_t) frames * u->frame_size >= n_bytes)
621                 break;
622
623             n_bytes -= (size_t) frames * u->frame_size;
624         }
625     }
626
627     if (u->use_tsched) {
628         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
629         process_usec = u->tsched_watermark_usec;
630
631         if (*sleep_usec > process_usec)
632             *sleep_usec -= process_usec;
633         else
634             *sleep_usec = 0;
635     }
636
637     return work_done ? 1 : 0;
638 }
639
640 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
641     int work_done = false;
642     pa_usec_t max_sleep_usec = 0, process_usec = 0;
643     size_t left_to_record;
644     unsigned j = 0;
645
646     pa_assert(u);
647     pa_source_assert_ref(u->source);
648
649     if (u->use_tsched)
650         hw_sleep_time(u, &max_sleep_usec, &process_usec);
651
652     for (;;) {
653         snd_pcm_sframes_t n;
654         size_t n_bytes;
655         int r;
656         bool after_avail = true;
657
658         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
659
660             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
661                 continue;
662
663             return r;
664         }
665
666         n_bytes = (size_t) n * u->frame_size;
667         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
668         on_timeout = false;
669
670         if (u->use_tsched)
671             if (!polled &&
672                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
673                 break;
674
675         if (PA_UNLIKELY(n_bytes <= 0)) {
676
677             if (polled)
678                 PA_ONCE_BEGIN {
679                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
680                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
681                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
682                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
683                            pa_strnull(dn));
684                     pa_xfree(dn);
685                 } PA_ONCE_END;
686
687             break;
688         }
689
690         if (++j > 10) {
691 #ifdef DEBUG_TIMING
692             pa_log_debug("Not filling up, because already too many iterations.");
693 #endif
694
695             break;
696         }
697
698         polled = false;
699
700         for (;;) {
701             void *p;
702             snd_pcm_sframes_t frames;
703             pa_memchunk chunk;
704
705             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
706
707             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
708
709             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
710                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
711
712 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
713
714             p = pa_memblock_acquire(chunk.memblock);
715             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
716             pa_memblock_release(chunk.memblock);
717
718             if (PA_UNLIKELY(frames < 0)) {
719                 pa_memblock_unref(chunk.memblock);
720
721                 if (!after_avail && (int) frames == -EAGAIN)
722                     break;
723
724                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
725                     continue;
726
727                 return r;
728             }
729
730             if (!after_avail && frames == 0) {
731                 pa_memblock_unref(chunk.memblock);
732                 break;
733             }
734
735             pa_assert(frames > 0);
736             after_avail = false;
737
738             chunk.index = 0;
739             chunk.length = (size_t) frames * u->frame_size;
740
741             pa_source_post(u->source, &chunk);
742             pa_memblock_unref(chunk.memblock);
743
744             work_done = true;
745
746             u->read_count += frames * u->frame_size;
747
748 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
749
750             if ((size_t) frames * u->frame_size >= n_bytes)
751                 break;
752
753             n_bytes -= (size_t) frames * u->frame_size;
754         }
755     }
756
757     if (u->use_tsched) {
758         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
759         process_usec = u->tsched_watermark_usec;
760
761         if (*sleep_usec > process_usec)
762             *sleep_usec -= process_usec;
763         else
764             *sleep_usec = 0;
765     }
766
767     return work_done ? 1 : 0;
768 }
769
770 static void update_smoother(struct userdata *u) {
771     snd_pcm_sframes_t delay = 0;
772     uint64_t position;
773     int err;
774     pa_usec_t now1 = 0, now2;
775     snd_pcm_status_t *status;
776     snd_htimestamp_t htstamp = { 0, 0 };
777
778     snd_pcm_status_alloca(&status);
779
780     pa_assert(u);
781     pa_assert(u->pcm_handle);
782
783     /* Let's update the time smoother */
784
785     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
786         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
787         return;
788     }
789
790     snd_pcm_status_get_htstamp(status, &htstamp);
791     now1 = pa_timespec_load(&htstamp);
792
793     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
794     if (now1 <= 0)
795         now1 = pa_rtclock_now();
796
797     /* check if the time since the last update is bigger than the interval */
798     if (u->last_smoother_update > 0)
799         if (u->last_smoother_update + u->smoother_interval > now1)
800             return;
801
802     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
803     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
804
805     pa_smoother_put(u->smoother, now1, now2);
806
807     u->last_smoother_update = now1;
808     /* exponentially increase the update interval up to the MAX limit */
809     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
810 }
811
812 static int64_t source_get_latency(struct userdata *u) {
813     int64_t delay;
814     pa_usec_t now1, now2;
815
816     pa_assert(u);
817
818     now1 = pa_rtclock_now();
819     now2 = pa_smoother_get(u->smoother, now1);
820
821     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
822
823     return delay;
824 }
825
826 static int build_pollfd(struct userdata *u) {
827     pa_assert(u);
828     pa_assert(u->pcm_handle);
829
830     if (u->alsa_rtpoll_item)
831         pa_rtpoll_item_free(u->alsa_rtpoll_item);
832
833     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
834         return -1;
835
836     return 0;
837 }
838
839 /* Called from IO context */
840 static void suspend(struct userdata *u) {
841     pa_assert(u);
842     pa_assert(u->pcm_handle);
843
844     pa_smoother_pause(u->smoother, pa_rtclock_now());
845
846     /* Let's suspend */
847     snd_pcm_close(u->pcm_handle);
848     u->pcm_handle = NULL;
849
850     if (u->alsa_rtpoll_item) {
851         pa_rtpoll_item_free(u->alsa_rtpoll_item);
852         u->alsa_rtpoll_item = NULL;
853     }
854
855     pa_log_info("Device suspended...");
856 }
857
858 /* Called from IO context */
859 static int update_sw_params(struct userdata *u) {
860     snd_pcm_uframes_t avail_min;
861     int err;
862
863     pa_assert(u);
864
865     /* Use the full buffer if no one asked us for anything specific */
866     u->hwbuf_unused = 0;
867
868     if (u->use_tsched) {
869         pa_usec_t latency;
870
871         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
872             size_t b;
873
874             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
875
876             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
877
878             /* We need at least one sample in our buffer */
879
880             if (PA_UNLIKELY(b < u->frame_size))
881                 b = u->frame_size;
882
883             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
884         }
885
886         fix_min_sleep_wakeup(u);
887         fix_tsched_watermark(u);
888     }
889
890     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
891
892     avail_min = 1;
893
894     if (u->use_tsched) {
895         pa_usec_t sleep_usec, process_usec;
896
897         hw_sleep_time(u, &sleep_usec, &process_usec);
898         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
899     }
900
901     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
902
903     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
904         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
905         return err;
906     }
907
908     return 0;
909 }
910
911 /* Called from IO Context on unsuspend or from main thread when creating source */
912 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
913                             bool in_thread) {
914     u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
915
916     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
917     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
918
919     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
920     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
921
922     fix_min_sleep_wakeup(u);
923     fix_tsched_watermark(u);
924
925     if (in_thread)
926         pa_source_set_latency_range_within_thread(u->source,
927                                                   u->min_latency_ref,
928                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
929     else {
930         pa_source_set_latency_range(u->source,
931                                     0,
932                                     pa_bytes_to_usec(u->hwbuf_size, ss));
933
934         /* work-around assert in pa_source_set_latency_within_thead,
935            keep track of min_latency and reuse it when
936            this routine is called from IO context */
937         u->min_latency_ref = u->source->thread_info.min_latency;
938     }
939
940     pa_log_info("Time scheduling watermark is %0.2fms",
941                 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
942 }
943
944 /* Called from IO context */
945 static int unsuspend(struct userdata *u) {
946     pa_sample_spec ss;
947     int err;
948     bool b, d;
949     snd_pcm_uframes_t period_size, buffer_size;
950
951     pa_assert(u);
952     pa_assert(!u->pcm_handle);
953
954     pa_log_info("Trying resume...");
955
956     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
957                             SND_PCM_NONBLOCK|
958                             SND_PCM_NO_AUTO_RESAMPLE|
959                             SND_PCM_NO_AUTO_CHANNELS|
960                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
961         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
962         goto fail;
963     }
964
965     ss = u->source->sample_spec;
966     period_size = u->fragment_size / u->frame_size;
967     buffer_size = u->hwbuf_size / u->frame_size;
968     b = u->use_mmap;
969     d = u->use_tsched;
970
971     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
972         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
973         goto fail;
974     }
975
976     if (b != u->use_mmap || d != u->use_tsched) {
977         pa_log_warn("Resume failed, couldn't get original access mode.");
978         goto fail;
979     }
980
981     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
982         pa_log_warn("Resume failed, couldn't restore original sample settings.");
983         goto fail;
984     }
985
986     if (period_size*u->frame_size != u->fragment_size ||
987         buffer_size*u->frame_size != u->hwbuf_size) {
988         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
989                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
990                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
991         goto fail;
992     }
993
994     if (update_sw_params(u) < 0)
995         goto fail;
996
997     if (build_pollfd(u) < 0)
998         goto fail;
999
1000     /* FIXME: We need to reload the volume somehow */
1001
1002     u->read_count = 0;
1003     pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1004     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1005     u->last_smoother_update = 0;
1006
1007     u->first = true;
1008
1009     /* reset the watermark to the value defined when source was created */
1010     if (u->use_tsched)
1011         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1012
1013     pa_log_info("Resumed successfully...");
1014
1015     return 0;
1016
1017 fail:
1018     if (u->pcm_handle) {
1019         snd_pcm_close(u->pcm_handle);
1020         u->pcm_handle = NULL;
1021     }
1022
1023     return -PA_ERR_IO;
1024 }
1025
1026 /* Called from IO context */
1027 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1028     struct userdata *u = PA_SOURCE(o)->userdata;
1029
1030     switch (code) {
1031
1032         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1033             int64_t r = 0;
1034
1035             if (u->pcm_handle)
1036                 r = source_get_latency(u);
1037
1038             *((int64_t*) data) = r;
1039
1040             return 0;
1041         }
1042     }
1043
1044     return pa_source_process_msg(o, code, data, offset, chunk);
1045 }
1046
1047 /* Called from main context */
1048 static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1049     pa_source_state_t old_state;
1050     struct userdata *u;
1051
1052     pa_source_assert_ref(s);
1053     pa_assert_se(u = s->userdata);
1054
1055     old_state = pa_source_get_state(u->source);
1056
1057     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1058         reserve_done(u);
1059     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1060         if (reserve_init(u, u->device_name) < 0)
1061             return -PA_ERR_BUSY;
1062
1063     return 0;
1064 }
1065
1066 /* Called from the IO thread. */
1067 static int source_set_state_in_io_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1068     struct userdata *u;
1069
1070     pa_assert(s);
1071     pa_assert_se(u = s->userdata);
1072
1073     /* It may be that only the suspend cause is changing, in which case there's
1074      * nothing to do. */
1075     if (new_state == s->thread_info.state)
1076         return 0;
1077
1078     switch (new_state) {
1079
1080         case PA_SOURCE_SUSPENDED: {
1081             pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1082
1083             suspend(u);
1084
1085             break;
1086         }
1087
1088         case PA_SOURCE_IDLE:
1089         case PA_SOURCE_RUNNING: {
1090             int r;
1091
1092             if (u->source->thread_info.state == PA_SOURCE_INIT) {
1093                 if (build_pollfd(u) < 0)
1094                     /* FIXME: This will cause an assertion failure, because
1095                      * with the current design pa_source_put() is not allowed
1096                      * to fail and pa_source_put() has no fallback code that
1097                      * would start the source suspended if opening the device
1098                      * fails. */
1099                     return -PA_ERR_IO;
1100             }
1101
1102             if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1103                 if ((r = unsuspend(u)) < 0)
1104                     return r;
1105             }
1106
1107             break;
1108         }
1109
1110         case PA_SOURCE_UNLINKED:
1111         case PA_SOURCE_INIT:
1112         case PA_SOURCE_INVALID_STATE:
1113             ;
1114     }
1115
1116     return 0;
1117 }
1118
1119 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1120     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1121
1122     pa_assert(u);
1123     pa_assert(u->mixer_handle);
1124
1125     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1126         return 0;
1127
1128     if (!PA_SOURCE_IS_LINKED(u->source->state))
1129         return 0;
1130
1131     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1132         pa_source_set_mixer_dirty(u->source, true);
1133         return 0;
1134     }
1135
1136     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1137         pa_source_get_volume(u->source, true);
1138         pa_source_get_mute(u->source, true);
1139     }
1140
1141     return 0;
1142 }
1143
1144 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1145     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1146
1147     pa_assert(u);
1148     pa_assert(u->mixer_handle);
1149
1150     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1151         return 0;
1152
1153     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1154         pa_source_set_mixer_dirty(u->source, true);
1155         return 0;
1156     }
1157
1158     if (mask & SND_CTL_EVENT_MASK_VALUE)
1159         pa_source_update_volume_and_mute(u->source);
1160
1161     return 0;
1162 }
1163
1164 static void source_get_volume_cb(pa_source *s) {
1165     struct userdata *u = s->userdata;
1166     pa_cvolume r;
1167     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1168
1169     pa_assert(u);
1170     pa_assert(u->mixer_path);
1171     pa_assert(u->mixer_handle);
1172
1173     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1174         return;
1175
1176     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1177     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1178
1179     pa_log_debug("Read hardware volume: %s",
1180                  pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1181
1182     if (pa_cvolume_equal(&u->hardware_volume, &r))
1183         return;
1184
1185     s->real_volume = u->hardware_volume = r;
1186
1187     /* Hmm, so the hardware volume changed, let's reset our software volume */
1188     if (u->mixer_path->has_dB)
1189         pa_source_set_soft_volume(s, NULL);
1190 }
1191
1192 static void source_set_volume_cb(pa_source *s) {
1193     struct userdata *u = s->userdata;
1194     pa_cvolume r;
1195     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1196     bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1197
1198     pa_assert(u);
1199     pa_assert(u->mixer_path);
1200     pa_assert(u->mixer_handle);
1201
1202     /* Shift up by the base volume */
1203     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1204
1205     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1206         return;
1207
1208     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1209     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1210
1211     u->hardware_volume = r;
1212
1213     if (u->mixer_path->has_dB) {
1214         pa_cvolume new_soft_volume;
1215         bool accurate_enough;
1216
1217         /* Match exactly what the user requested by software */
1218         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1219
1220         /* If the adjustment to do in software is only minimal we
1221          * can skip it. That saves us CPU at the expense of a bit of
1222          * accuracy */
1223         accurate_enough =
1224             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1225             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1226
1227         pa_log_debug("Requested volume: %s",
1228                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1229         pa_log_debug("Got hardware volume: %s",
1230                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1231         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1232                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1233                      pa_yes_no(accurate_enough));
1234
1235         if (!accurate_enough)
1236             s->soft_volume = new_soft_volume;
1237
1238     } else {
1239         pa_log_debug("Wrote hardware volume: %s",
1240                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1241
1242         /* We can't match exactly what the user requested, hence let's
1243          * at least tell the user about it */
1244
1245         s->real_volume = r;
1246     }
1247 }
1248
1249 static void source_write_volume_cb(pa_source *s) {
1250     struct userdata *u = s->userdata;
1251     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1252
1253     pa_assert(u);
1254     pa_assert(u->mixer_path);
1255     pa_assert(u->mixer_handle);
1256     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1257
1258     /* Shift up by the base volume */
1259     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1260
1261     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1262         pa_log_error("Writing HW volume failed");
1263     else {
1264         pa_cvolume tmp_vol;
1265         bool accurate_enough;
1266
1267         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1268         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1269
1270         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1271         accurate_enough =
1272             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1273             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1274
1275         if (!accurate_enough) {
1276             char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1277
1278             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1279                          pa_cvolume_snprint_verbose(volume_buf[0],
1280                                                     sizeof(volume_buf[0]),
1281                                                     &s->thread_info.current_hw_volume,
1282                                                     &s->channel_map,
1283                                                     true),
1284                          pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1285         }
1286     }
1287 }
1288
1289 static int source_get_mute_cb(pa_source *s, bool *mute) {
1290     struct userdata *u = s->userdata;
1291
1292     pa_assert(u);
1293     pa_assert(u->mixer_path);
1294     pa_assert(u->mixer_handle);
1295
1296     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1297         return -1;
1298
1299     return 0;
1300 }
1301
1302 static void source_set_mute_cb(pa_source *s) {
1303     struct userdata *u = s->userdata;
1304
1305     pa_assert(u);
1306     pa_assert(u->mixer_path);
1307     pa_assert(u->mixer_handle);
1308
1309     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1310 }
1311
1312 static void mixer_volume_init(struct userdata *u) {
1313     pa_assert(u);
1314
1315     if (!u->mixer_path->has_volume) {
1316         pa_source_set_write_volume_callback(u->source, NULL);
1317         pa_source_set_get_volume_callback(u->source, NULL);
1318         pa_source_set_set_volume_callback(u->source, NULL);
1319
1320         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1321     } else {
1322         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1323         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1324
1325         if (u->mixer_path->has_dB && u->deferred_volume) {
1326             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1327             pa_log_info("Successfully enabled deferred volume.");
1328         } else
1329             pa_source_set_write_volume_callback(u->source, NULL);
1330
1331         if (u->mixer_path->has_dB) {
1332             pa_source_enable_decibel_volume(u->source, true);
1333             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1334
1335             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1336             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1337
1338             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1339         } else {
1340             pa_source_enable_decibel_volume(u->source, false);
1341             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1342
1343             u->source->base_volume = PA_VOLUME_NORM;
1344             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1345         }
1346
1347         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1348     }
1349
1350     if (!u->mixer_path->has_mute) {
1351         pa_source_set_get_mute_callback(u->source, NULL);
1352         pa_source_set_set_mute_callback(u->source, NULL);
1353         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1354     } else {
1355         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1356         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1357         pa_log_info("Using hardware mute control.");
1358     }
1359 }
1360
1361 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1362     struct userdata *u = s->userdata;
1363
1364     pa_assert(u);
1365     pa_assert(p);
1366     pa_assert(u->ucm_context);
1367
1368     return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1369 }
1370
1371 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1372     struct userdata *u = s->userdata;
1373     pa_alsa_port_data *data;
1374
1375     pa_assert(u);
1376     pa_assert(p);
1377     pa_assert(u->mixer_handle);
1378
1379     data = PA_DEVICE_PORT_DATA(p);
1380
1381     pa_assert_se(u->mixer_path = data->path);
1382     pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1383
1384     mixer_volume_init(u);
1385
1386     if (s->set_mute)
1387         s->set_mute(s);
1388     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1389         if (s->write_volume)
1390             s->write_volume(s);
1391     } else {
1392         if (s->set_volume)
1393             s->set_volume(s);
1394     }
1395
1396     return 0;
1397 }
1398
1399 static void source_update_requested_latency_cb(pa_source *s) {
1400     struct userdata *u = s->userdata;
1401     pa_assert(u);
1402     pa_assert(u->use_tsched); /* only when timer scheduling is used
1403                                * we can dynamically adjust the
1404                                * latency */
1405
1406     if (!u->pcm_handle)
1407         return;
1408
1409     update_sw_params(u);
1410 }
1411
1412 static int source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1413     struct userdata *u = s->userdata;
1414     int i;
1415     bool supported = false;
1416
1417     /* FIXME: we only update rate for now */
1418
1419     pa_assert(u);
1420
1421     for (i = 0; u->rates[i]; i++) {
1422         if (u->rates[i] == spec->rate) {
1423             supported = true;
1424             break;
1425         }
1426     }
1427
1428     if (!supported) {
1429         pa_log_info("Source does not support sample rate of %d Hz", spec->rate);
1430         return -1;
1431     }
1432
1433     if (!PA_SOURCE_IS_OPENED(s->state)) {
1434         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, spec->rate);
1435         u->source->sample_spec.rate = spec->rate;
1436         return 0;
1437     }
1438
1439     return -1;
1440 }
1441
1442 static void thread_func(void *userdata) {
1443     struct userdata *u = userdata;
1444     unsigned short revents = 0;
1445
1446     pa_assert(u);
1447
1448     pa_log_debug("Thread starting up");
1449
1450     if (u->core->realtime_scheduling)
1451         pa_make_realtime(u->core->realtime_priority);
1452
1453     pa_thread_mq_install(&u->thread_mq);
1454
1455     for (;;) {
1456         int ret;
1457         pa_usec_t rtpoll_sleep = 0, real_sleep;
1458
1459 #ifdef DEBUG_TIMING
1460         pa_log_debug("Loop");
1461 #endif
1462
1463         /* Read some data and pass it to the sources */
1464         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1465             int work_done;
1466             pa_usec_t sleep_usec = 0;
1467             bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1468
1469             if (u->first) {
1470                 pa_log_info("Starting capture.");
1471                 snd_pcm_start(u->pcm_handle);
1472
1473                 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1474
1475                 u->first = false;
1476             }
1477
1478             if (u->use_mmap)
1479                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1480             else
1481                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1482
1483             if (work_done < 0)
1484                 goto fail;
1485
1486 /*             pa_log_debug("work_done = %i", work_done); */
1487
1488             if (work_done)
1489                 update_smoother(u);
1490
1491             if (u->use_tsched) {
1492                 pa_usec_t cusec;
1493
1494                 /* OK, the capture buffer is now empty, let's
1495                  * calculate when to wake up next */
1496
1497 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1498
1499                 /* Convert from the sound card time domain to the
1500                  * system time domain */
1501                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1502
1503 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1504
1505                 /* We don't trust the conversion, so we wake up whatever comes first */
1506                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1507             }
1508         }
1509
1510         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1511             pa_usec_t volume_sleep;
1512             pa_source_volume_change_apply(u->source, &volume_sleep);
1513             if (volume_sleep > 0) {
1514                 if (rtpoll_sleep > 0)
1515                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1516                 else
1517                     rtpoll_sleep = volume_sleep;
1518             }
1519         }
1520
1521         if (rtpoll_sleep > 0) {
1522             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1523             real_sleep = pa_rtclock_now();
1524         }
1525         else
1526             pa_rtpoll_set_timer_disabled(u->rtpoll);
1527
1528         /* Hmm, nothing to do. Let's sleep */
1529         if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1530             goto fail;
1531
1532         if (rtpoll_sleep > 0) {
1533             real_sleep = pa_rtclock_now() - real_sleep;
1534 #ifdef DEBUG_TIMING
1535             pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1536                 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1537                 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1538 #endif
1539             if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1540                 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1541                     (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1542                     (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1543         }
1544
1545         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1546             pa_source_volume_change_apply(u->source, NULL);
1547
1548         if (ret == 0)
1549             goto finish;
1550
1551         /* Tell ALSA about this and process its response */
1552         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1553             struct pollfd *pollfd;
1554             int err;
1555             unsigned n;
1556
1557             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1558
1559             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1560                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1561                 goto fail;
1562             }
1563
1564             if (revents & ~POLLIN) {
1565                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1566                     goto fail;
1567
1568                 u->first = true;
1569                 revents = 0;
1570             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1571                 pa_log_debug("Wakeup from ALSA!");
1572
1573         } else
1574             revents = 0;
1575     }
1576
1577 fail:
1578     /* If this was no regular exit from the loop we have to continue
1579      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1580     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1581     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1582
1583 finish:
1584     pa_log_debug("Thread shutting down");
1585 }
1586
1587 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1588     const char *n;
1589     char *t;
1590
1591     pa_assert(data);
1592     pa_assert(ma);
1593     pa_assert(device_name);
1594
1595     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1596         pa_source_new_data_set_name(data, n);
1597         data->namereg_fail = true;
1598         return;
1599     }
1600
1601     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1602         data->namereg_fail = true;
1603     else {
1604         n = device_id ? device_id : device_name;
1605         data->namereg_fail = false;
1606     }
1607
1608     if (mapping)
1609         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1610     else
1611         t = pa_sprintf_malloc("alsa_input.%s", n);
1612
1613     pa_source_new_data_set_name(data, t);
1614     pa_xfree(t);
1615 }
1616
1617 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1618     if (!mapping && !element)
1619         return;
1620
1621     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1622         pa_log_info("Failed to find a working mixer device.");
1623         return;
1624     }
1625
1626     if (element) {
1627
1628         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1629             goto fail;
1630
1631         if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
1632             goto fail;
1633
1634         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1635         pa_alsa_path_dump(u->mixer_path);
1636     } else if (!(u->mixer_path_set = mapping->input_path_set))
1637         goto fail;
1638
1639     return;
1640
1641 fail:
1642
1643     if (u->mixer_path) {
1644         pa_alsa_path_free(u->mixer_path);
1645         u->mixer_path = NULL;
1646     }
1647
1648     if (u->mixer_handle) {
1649         snd_mixer_close(u->mixer_handle);
1650         u->mixer_handle = NULL;
1651     }
1652 }
1653
1654 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1655     bool need_mixer_callback = false;
1656
1657     pa_assert(u);
1658
1659     if (!u->mixer_handle)
1660         return 0;
1661
1662     if (u->source->active_port) {
1663         pa_alsa_port_data *data;
1664
1665         /* We have a list of supported paths, so let's activate the
1666          * one that has been chosen as active */
1667
1668         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1669         u->mixer_path = data->path;
1670
1671         pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1672
1673     } else {
1674
1675         if (!u->mixer_path && u->mixer_path_set)
1676             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1677
1678         if (u->mixer_path) {
1679             /* Hmm, we have only a single path, then let's activate it */
1680
1681             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1682         } else
1683             return 0;
1684     }
1685
1686     mixer_volume_init(u);
1687
1688     /* Will we need to register callbacks? */
1689     if (u->mixer_path_set && u->mixer_path_set->paths) {
1690         pa_alsa_path *p;
1691         void *state;
1692
1693         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1694             if (p->has_volume || p->has_mute)
1695                 need_mixer_callback = true;
1696         }
1697     }
1698     else if (u->mixer_path)
1699         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1700
1701     if (need_mixer_callback) {
1702         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1703         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1704             u->mixer_pd = pa_alsa_mixer_pdata_new();
1705             mixer_callback = io_mixer_callback;
1706
1707             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1708                 pa_log("Failed to initialize file descriptor monitoring");
1709                 return -1;
1710             }
1711         } else {
1712             u->mixer_fdl = pa_alsa_fdlist_new();
1713             mixer_callback = ctl_mixer_callback;
1714
1715             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1716                 pa_log("Failed to initialize file descriptor monitoring");
1717                 return -1;
1718             }
1719         }
1720
1721         if (u->mixer_path_set)
1722             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1723         else
1724             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1725     }
1726
1727     return 0;
1728 }
1729
1730 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1731
1732     struct userdata *u = NULL;
1733     const char *dev_id = NULL, *key, *mod_name;
1734     pa_sample_spec ss;
1735     char *thread_name = NULL;
1736     uint32_t alternate_sample_rate;
1737     pa_channel_map map;
1738     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1739     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1740     size_t frame_size;
1741     bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1742     pa_source_new_data data;
1743     bool volume_is_set;
1744     bool mute_is_set;
1745     pa_alsa_profile_set *profile_set = NULL;
1746     void *state = NULL;
1747
1748     pa_assert(m);
1749     pa_assert(ma);
1750
1751     ss = m->core->default_sample_spec;
1752     map = m->core->default_channel_map;
1753
1754     /* Pick sample spec overrides from the mapping, if any */
1755     if (mapping) {
1756         if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1757             ss.format = mapping->sample_spec.format;
1758         if (mapping->sample_spec.rate != 0)
1759             ss.rate = mapping->sample_spec.rate;
1760         if (mapping->sample_spec.channels != 0) {
1761             ss.channels = mapping->sample_spec.channels;
1762             if (pa_channel_map_valid(&mapping->channel_map))
1763                 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1764         }
1765     }
1766
1767     /* Override with modargs if provided */
1768     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1769         pa_log("Failed to parse sample specification and channel map");
1770         goto fail;
1771     }
1772
1773     alternate_sample_rate = m->core->alternate_sample_rate;
1774     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1775         pa_log("Failed to parse alternate sample rate");
1776         goto fail;
1777     }
1778
1779     frame_size = pa_frame_size(&ss);
1780
1781     nfrags = m->core->default_n_fragments;
1782     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1783     if (frag_size <= 0)
1784         frag_size = (uint32_t) frame_size;
1785     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1786     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1787
1788     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1789         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1790         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1791         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1792         pa_log("Failed to parse buffer metrics");
1793         goto fail;
1794     }
1795
1796     buffer_size = nfrags * frag_size;
1797
1798     period_frames = frag_size/frame_size;
1799     buffer_frames = buffer_size/frame_size;
1800     tsched_frames = tsched_size/frame_size;
1801
1802     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1803         pa_log("Failed to parse mmap argument.");
1804         goto fail;
1805     }
1806
1807     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1808         pa_log("Failed to parse tsched argument.");
1809         goto fail;
1810     }
1811
1812     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1813         pa_log("Failed to parse ignore_dB argument.");
1814         goto fail;
1815     }
1816
1817     deferred_volume = m->core->deferred_volume;
1818     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1819         pa_log("Failed to parse deferred_volume argument.");
1820         goto fail;
1821     }
1822
1823     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1824         pa_log("Failed to parse fixed_latency_range argument.");
1825         goto fail;
1826     }
1827
1828     use_tsched = pa_alsa_may_tsched(use_tsched);
1829
1830     u = pa_xnew0(struct userdata, 1);
1831     u->core = m->core;
1832     u->module = m;
1833     u->use_mmap = use_mmap;
1834     u->use_tsched = use_tsched;
1835     u->deferred_volume = deferred_volume;
1836     u->fixed_latency_range = fixed_latency_range;
1837     u->first = true;
1838     u->rtpoll = pa_rtpoll_new();
1839
1840     if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
1841         pa_log("pa_thread_mq_init() failed.");
1842         goto fail;
1843     }
1844
1845     u->smoother = pa_smoother_new(
1846             SMOOTHER_ADJUST_USEC,
1847             SMOOTHER_WINDOW_USEC,
1848             true,
1849             true,
1850             5,
1851             pa_rtclock_now(),
1852             true);
1853     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1854
1855     /* use ucm */
1856     if (mapping && mapping->ucm_context.ucm)
1857         u->ucm_context = &mapping->ucm_context;
1858
1859     dev_id = pa_modargs_get_value(
1860             ma, "device_id",
1861             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1862
1863     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1864
1865     if (reserve_init(u, dev_id) < 0)
1866         goto fail;
1867
1868     if (reserve_monitor_init(u, dev_id) < 0)
1869         goto fail;
1870
1871     b = use_mmap;
1872     d = use_tsched;
1873
1874     /* Force ALSA to reread its configuration if module-alsa-card didn't
1875      * do it for us. This matters if our device was hot-plugged after ALSA
1876      * has already read its configuration - see
1877      * https://bugs.freedesktop.org/show_bug.cgi?id=54029
1878      */
1879
1880     if (!card)
1881         snd_config_update_free_global();
1882
1883     if (mapping) {
1884
1885         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1886             pa_log("device_id= not set");
1887             goto fail;
1888         }
1889
1890         if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1891             if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1892                 pa_log("Failed to enable ucm modifier %s", mod_name);
1893             else
1894                 pa_log_debug("Enabled ucm modifier %s", mod_name);
1895         }
1896
1897         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1898                       dev_id,
1899                       &u->device_name,
1900                       &ss, &map,
1901                       SND_PCM_STREAM_CAPTURE,
1902                       &period_frames, &buffer_frames, tsched_frames,
1903                       &b, &d, mapping)))
1904             goto fail;
1905
1906     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1907
1908         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1909             goto fail;
1910
1911         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1912                       dev_id,
1913                       &u->device_name,
1914                       &ss, &map,
1915                       SND_PCM_STREAM_CAPTURE,
1916                       &period_frames, &buffer_frames, tsched_frames,
1917                       &b, &d, profile_set, &mapping)))
1918             goto fail;
1919
1920     } else {
1921
1922         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1923                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1924                       &u->device_name,
1925                       &ss, &map,
1926                       SND_PCM_STREAM_CAPTURE,
1927                       &period_frames, &buffer_frames, tsched_frames,
1928                       &b, &d, false)))
1929             goto fail;
1930     }
1931
1932     pa_assert(u->device_name);
1933     pa_log_info("Successfully opened device %s.", u->device_name);
1934
1935     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1936         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1937         goto fail;
1938     }
1939
1940     if (mapping)
1941         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1942
1943     if (use_mmap && !b) {
1944         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1945         u->use_mmap = use_mmap = false;
1946     }
1947
1948     if (use_tsched && (!b || !d)) {
1949         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1950         u->use_tsched = use_tsched = false;
1951     }
1952
1953     if (u->use_mmap)
1954         pa_log_info("Successfully enabled mmap() mode.");
1955
1956     if (u->use_tsched) {
1957         pa_log_info("Successfully enabled timer-based scheduling mode.");
1958         if (u->fixed_latency_range)
1959             pa_log_info("Disabling latency range changes on overrun");
1960     }
1961
1962     u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1963     if (!u->rates) {
1964         pa_log_error("Failed to find any supported sample rates.");
1965         goto fail;
1966     }
1967
1968     /* ALSA might tweak the sample spec, so recalculate the frame size */
1969     frame_size = pa_frame_size(&ss);
1970
1971     if (!u->ucm_context)
1972         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1973
1974     pa_source_new_data_init(&data);
1975     data.driver = driver;
1976     data.module = m;
1977     data.card = card;
1978     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1979
1980     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1981      * variable instead of using &data.namereg_fail directly, because
1982      * data.namereg_fail is a bitfield and taking the address of a bitfield
1983      * variable is impossible. */
1984     namereg_fail = data.namereg_fail;
1985     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1986         pa_log("Failed to parse namereg_fail argument.");
1987         pa_source_new_data_done(&data);
1988         goto fail;
1989     }
1990     data.namereg_fail = namereg_fail;
1991
1992     pa_source_new_data_set_sample_spec(&data, &ss);
1993     pa_source_new_data_set_channel_map(&data, &map);
1994     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1995
1996     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1997     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1998     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1999     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2000     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2001
2002     if (mapping) {
2003         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2004         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2005
2006         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2007             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2008     }
2009
2010     pa_alsa_init_description(data.proplist, card);
2011
2012     if (u->control_device)
2013         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2014
2015     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2016         pa_log("Invalid properties");
2017         pa_source_new_data_done(&data);
2018         goto fail;
2019     }
2020
2021     if (u->ucm_context)
2022         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
2023     else if (u->mixer_path_set)
2024         pa_alsa_add_ports(&data, u->mixer_path_set, card);
2025
2026     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2027     volume_is_set = data.volume_is_set;
2028     mute_is_set = data.muted_is_set;
2029     pa_source_new_data_done(&data);
2030
2031     if (!u->source) {
2032         pa_log("Failed to create source object");
2033         goto fail;
2034     }
2035
2036     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2037                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
2038         pa_log("Failed to parse deferred_volume_safety_margin parameter");
2039         goto fail;
2040     }
2041
2042     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2043                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
2044         pa_log("Failed to parse deferred_volume_extra_delay parameter");
2045         goto fail;
2046     }
2047
2048     u->source->parent.process_msg = source_process_msg;
2049     if (u->use_tsched)
2050         u->source->update_requested_latency = source_update_requested_latency_cb;
2051     u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb;
2052     u->source->set_state_in_io_thread = source_set_state_in_io_thread_cb;
2053     if (u->ucm_context)
2054         u->source->set_port = source_set_port_ucm_cb;
2055     else
2056         u->source->set_port = source_set_port_cb;
2057     if (u->source->alternate_sample_rate)
2058         u->source->reconfigure = source_reconfigure_cb;
2059     u->source->userdata = u;
2060
2061     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2062     pa_source_set_rtpoll(u->source, u->rtpoll);
2063
2064     u->frame_size = frame_size;
2065     u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2066     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2067     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2068     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2069
2070     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2071                 (double) u->hwbuf_size / (double) u->fragment_size,
2072                 (long unsigned) u->fragment_size,
2073                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2074                 (long unsigned) u->hwbuf_size,
2075                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2076
2077     if (u->use_tsched) {
2078         u->tsched_watermark_ref = tsched_watermark;
2079         reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2080     }
2081     else
2082         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2083
2084     reserve_update(u);
2085
2086     if (update_sw_params(u) < 0)
2087         goto fail;
2088
2089     if (u->ucm_context) {
2090         if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2091             goto fail;
2092     } else if (setup_mixer(u, ignore_dB) < 0)
2093         goto fail;
2094
2095     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2096
2097     thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2098     if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2099         pa_log("Failed to create thread.");
2100         goto fail;
2101     }
2102     pa_xfree(thread_name);
2103     thread_name = NULL;
2104
2105     /* Get initial mixer settings */
2106     if (volume_is_set) {
2107         if (u->source->set_volume)
2108             u->source->set_volume(u->source);
2109     } else {
2110         if (u->source->get_volume)
2111             u->source->get_volume(u->source);
2112     }
2113
2114     if (mute_is_set) {
2115         if (u->source->set_mute)
2116             u->source->set_mute(u->source);
2117     } else {
2118         if (u->source->get_mute) {
2119             bool mute;
2120
2121             if (u->source->get_mute(u->source, &mute) >= 0)
2122                 pa_source_set_mute(u->source, mute, false);
2123         }
2124     }
2125
2126     if ((volume_is_set || mute_is_set) && u->source->write_volume)
2127         u->source->write_volume(u->source);
2128
2129     pa_source_put(u->source);
2130
2131     if (profile_set)
2132         pa_alsa_profile_set_free(profile_set);
2133
2134     return u->source;
2135
2136 fail:
2137     pa_xfree(thread_name);
2138
2139     if (u)
2140         userdata_free(u);
2141
2142     if (profile_set)
2143         pa_alsa_profile_set_free(profile_set);
2144
2145     return NULL;
2146 }
2147
2148 static void userdata_free(struct userdata *u) {
2149     pa_assert(u);
2150
2151     if (u->source)
2152         pa_source_unlink(u->source);
2153
2154     if (u->thread) {
2155         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2156         pa_thread_free(u->thread);
2157     }
2158
2159     pa_thread_mq_done(&u->thread_mq);
2160
2161     if (u->source)
2162         pa_source_unref(u->source);
2163
2164     if (u->mixer_pd)
2165         pa_alsa_mixer_pdata_free(u->mixer_pd);
2166
2167     if (u->alsa_rtpoll_item)
2168         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2169
2170     if (u->rtpoll)
2171         pa_rtpoll_free(u->rtpoll);
2172
2173     if (u->pcm_handle) {
2174         snd_pcm_drop(u->pcm_handle);
2175         snd_pcm_close(u->pcm_handle);
2176     }
2177
2178     if (u->mixer_fdl)
2179         pa_alsa_fdlist_free(u->mixer_fdl);
2180
2181     if (u->mixer_path && !u->mixer_path_set)
2182         pa_alsa_path_free(u->mixer_path);
2183
2184     if (u->mixer_handle)
2185         snd_mixer_close(u->mixer_handle);
2186
2187     if (u->smoother)
2188         pa_smoother_free(u->smoother);
2189
2190     if (u->rates)
2191         pa_xfree(u->rates);
2192
2193     reserve_done(u);
2194     monitor_done(u);
2195
2196     pa_xfree(u->device_name);
2197     pa_xfree(u->control_device);
2198     pa_xfree(u->paths_dir);
2199     pa_xfree(u);
2200 }
2201
2202 void pa_alsa_source_free(pa_source *s) {
2203     struct userdata *u;
2204
2205     pa_source_assert_ref(s);
2206     pa_assert_se(u = s->userdata);
2207
2208     userdata_free(u);
2209 }