6bec188eae8102bc4cd92dcff52fc6044c65d88b
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <signal.h>
26 #include <stdio.h>
27
28 #include <asoundlib.h>
29
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/volume.h>
33 #include <pulse/xmalloc.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/i18n.h>
37 #include <pulsecore/module.h>
38 #include <pulsecore/memchunk.h>
39 #include <pulsecore/sink.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/core-rtclock.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/thread-mq.h>
48 #include <pulsecore/rtpoll.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include <modules/reserve-wrap.h>
52
53 #include "alsa-util.h"
54 #include "alsa-source.h"
55
56 /* #define DEBUG_TIMING */
57
58 #define DEFAULT_DEVICE "default"
59
60 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
61 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
62
63 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
64 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
65 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
66 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
67 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
68 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
69
70 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
71 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
72
73 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
74 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82     pa_core *core;
83     pa_module *module;
84     pa_source *source;
85
86     pa_thread *thread;
87     pa_thread_mq thread_mq;
88     pa_rtpoll *rtpoll;
89
90     snd_pcm_t *pcm_handle;
91
92     char *paths_dir;
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     unsigned int *rates;
102
103     size_t
104         frame_size,
105         fragment_size,
106         hwbuf_size,
107         tsched_watermark,
108         tsched_watermark_ref,
109         hwbuf_unused,
110         min_sleep,
111         min_wakeup,
112         watermark_inc_step,
113         watermark_dec_step,
114         watermark_inc_threshold,
115         watermark_dec_threshold;
116
117     snd_pcm_uframes_t frames_per_block;
118
119     pa_usec_t watermark_dec_not_before;
120     pa_usec_t min_latency_ref;
121     pa_usec_t tsched_watermark_usec;
122
123     char *device_name;  /* name of the PCM device */
124     char *control_device; /* name of the control device */
125
126     bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
127
128     bool first;
129
130     pa_rtpoll_item *alsa_rtpoll_item;
131
132     pa_smoother *smoother;
133     uint64_t read_count;
134     pa_usec_t smoother_interval;
135     pa_usec_t last_smoother_update;
136
137     pa_reserve_wrapper *reserve;
138     pa_hook_slot *reserve_slot;
139     pa_reserve_monitor_wrapper *monitor;
140     pa_hook_slot *monitor_slot;
141
142     /* ucm context */
143     pa_alsa_ucm_mapping_context *ucm_context;
144 };
145
146 static void userdata_free(struct userdata *u);
147
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
149     pa_assert(r);
150     pa_assert(u);
151
152     pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
153
154     if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155         return PA_HOOK_CANCEL;
156
157     return PA_HOOK_OK;
158 }
159
160 static void reserve_done(struct userdata *u) {
161     pa_assert(u);
162
163     if (u->reserve_slot) {
164         pa_hook_slot_free(u->reserve_slot);
165         u->reserve_slot = NULL;
166     }
167
168     if (u->reserve) {
169         pa_reserve_wrapper_unref(u->reserve);
170         u->reserve = NULL;
171     }
172 }
173
174 static void reserve_update(struct userdata *u) {
175     const char *description;
176     pa_assert(u);
177
178     if (!u->source || !u->reserve)
179         return;
180
181     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
183 }
184
185 static int reserve_init(struct userdata *u, const char *dname) {
186     char *rname;
187
188     pa_assert(u);
189     pa_assert(dname);
190
191     if (u->reserve)
192         return 0;
193
194     if (pa_in_system_mode())
195         return 0;
196
197     if (!(rname = pa_alsa_get_reserve_name(dname)))
198         return 0;
199
200     /* We are resuming, try to lock the device */
201     u->reserve = pa_reserve_wrapper_get(u->core, rname);
202     pa_xfree(rname);
203
204     if (!(u->reserve))
205         return -1;
206
207     reserve_update(u);
208
209     pa_assert(!u->reserve_slot);
210     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211
212     return 0;
213 }
214
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
216     pa_assert(w);
217     pa_assert(u);
218
219     if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220         pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221         pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
222     } else {
223         pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224         pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
225     }
226
227     return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231     pa_assert(u);
232
233     if (u->monitor_slot) {
234         pa_hook_slot_free(u->monitor_slot);
235         u->monitor_slot = NULL;
236     }
237
238     if (u->monitor) {
239         pa_reserve_monitor_wrapper_unref(u->monitor);
240         u->monitor = NULL;
241     }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245     char *rname;
246
247     pa_assert(u);
248     pa_assert(dname);
249
250     if (pa_in_system_mode())
251         return 0;
252
253     if (!(rname = pa_alsa_get_reserve_name(dname)))
254         return 0;
255
256     /* We are resuming, try to lock the device */
257     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258     pa_xfree(rname);
259
260     if (!(u->monitor))
261         return -1;
262
263     pa_assert(!u->monitor_slot);
264     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266     return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270     size_t max_use, max_use_2;
271
272     pa_assert(u);
273     pa_assert(u->use_tsched);
274
275     max_use = u->hwbuf_size - u->hwbuf_unused;
276     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
277
278     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286     size_t max_use;
287     pa_assert(u);
288     pa_assert(u->use_tsched);
289
290     max_use = u->hwbuf_size - u->hwbuf_unused;
291
292     if (u->tsched_watermark > max_use - u->min_sleep)
293         u->tsched_watermark = max_use - u->min_sleep;
294
295     if (u->tsched_watermark < u->min_wakeup)
296         u->tsched_watermark = u->min_wakeup;
297
298    u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
299 }
300
301 static void increase_watermark(struct userdata *u) {
302     size_t old_watermark;
303     pa_usec_t old_min_latency, new_min_latency;
304
305     pa_assert(u);
306     pa_assert(u->use_tsched);
307
308     /* First, just try to increase the watermark */
309     old_watermark = u->tsched_watermark;
310     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311     fix_tsched_watermark(u);
312
313     if (old_watermark != u->tsched_watermark) {
314         pa_log_info("Increasing wakeup watermark to %0.2f ms",
315                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
316         return;
317     }
318
319     /* Hmm, we cannot increase the watermark any further, hence let's
320      raise the latency unless doing so was disabled in
321      configuration */
322     if (u->fixed_latency_range)
323         return;
324
325     old_min_latency = u->source->thread_info.min_latency;
326     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
328
329     if (old_min_latency != new_min_latency) {
330         pa_log_info("Increasing minimal latency to %0.2f ms",
331                     (double) new_min_latency / PA_USEC_PER_MSEC);
332
333         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
334     }
335
336     /* When we reach this we're officially fucked! */
337 }
338
339 static void decrease_watermark(struct userdata *u) {
340     size_t old_watermark;
341     pa_usec_t now;
342
343     pa_assert(u);
344     pa_assert(u->use_tsched);
345
346     now = pa_rtclock_now();
347
348     if (u->watermark_dec_not_before <= 0)
349         goto restart;
350
351     if (u->watermark_dec_not_before > now)
352         return;
353
354     old_watermark = u->tsched_watermark;
355
356     if (u->tsched_watermark < u->watermark_dec_step)
357         u->tsched_watermark = u->tsched_watermark / 2;
358     else
359         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
360
361     fix_tsched_watermark(u);
362
363     if (old_watermark != u->tsched_watermark)
364         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
366
367     /* We don't change the latency range*/
368
369 restart:
370     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
371 }
372
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
374     pa_usec_t wm, usec;
375
376     pa_assert(sleep_usec);
377     pa_assert(process_usec);
378
379     pa_assert(u);
380     pa_assert(u->use_tsched);
381
382     usec = pa_source_get_requested_latency_within_thread(u->source);
383
384     if (usec == (pa_usec_t) -1)
385         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
386
387     wm = u->tsched_watermark_usec;
388
389     if (wm > usec)
390         wm = usec/2;
391
392     *sleep_usec = usec - wm;
393     *process_usec = wm;
394
395 #ifdef DEBUG_TIMING
396     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397                  (unsigned long) (usec / PA_USEC_PER_MSEC),
398                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
400 #endif
401 }
402
403 static int try_recover(struct userdata *u, const char *call, int err) {
404     pa_assert(u);
405     pa_assert(call);
406     pa_assert(err < 0);
407
408     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
409
410     pa_assert(err != -EAGAIN);
411
412     if (err == -EPIPE)
413         pa_log_debug("%s: Buffer overrun!", call);
414
415     if (err == -ESTRPIPE)
416         pa_log_debug("%s: System suspended!", call);
417
418     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419         pa_log("%s: %s", call, pa_alsa_strerror(err));
420         return -1;
421     }
422
423     u->first = true;
424     return 0;
425 }
426
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428     size_t left_to_record;
429     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430     bool overrun = false;
431
432     /* We use <= instead of < for this check here because an overrun
433      * only happens after the last sample was processed, not already when
434      * it is removed from the buffer. This is particularly important
435      * when block transfer is used. */
436
437     if (n_bytes <= rec_space)
438         left_to_record = rec_space - n_bytes;
439     else {
440
441         /* We got a dropout. What a mess! */
442         left_to_record = 0;
443         overrun = true;
444
445 #ifdef DEBUG_TIMING
446         PA_DEBUG_TRAP;
447 #endif
448
449         if (pa_log_ratelimit(PA_LOG_INFO))
450             pa_log_info("Overrun!");
451     }
452
453 #ifdef DEBUG_TIMING
454     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457     if (u->use_tsched) {
458         bool reset_not_before = true;
459
460         if (overrun || left_to_record < u->watermark_inc_threshold)
461             increase_watermark(u);
462         else if (left_to_record > u->watermark_dec_threshold) {
463             reset_not_before = false;
464
465             /* We decrease the watermark only if have actually
466              * been woken up by a timeout. If something else woke
467              * us up it's too easy to fulfill the deadlines... */
468
469             if (on_timeout)
470                 decrease_watermark(u);
471         }
472
473         if (reset_not_before)
474             u->watermark_dec_not_before = 0;
475     }
476
477     return left_to_record;
478 }
479
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481     bool work_done = false;
482     pa_usec_t max_sleep_usec = 0, process_usec = 0;
483     size_t left_to_record;
484     unsigned j = 0;
485
486     pa_assert(u);
487     pa_source_assert_ref(u->source);
488
489     if (u->use_tsched)
490         hw_sleep_time(u, &max_sleep_usec, &process_usec);
491
492     for (;;) {
493         snd_pcm_sframes_t n;
494         size_t n_bytes;
495         int r;
496         bool after_avail = true;
497
498         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
499
500             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
501                 continue;
502
503             return r;
504         }
505
506         n_bytes = (size_t) n * u->frame_size;
507
508 #ifdef DEBUG_TIMING
509         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
510 #endif
511
512         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
513         on_timeout = false;
514
515         if (u->use_tsched)
516             if (!polled &&
517                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
518 #ifdef DEBUG_TIMING
519                 pa_log_debug("Not reading, because too early.");
520 #endif
521                 break;
522             }
523
524         if (PA_UNLIKELY(n_bytes <= 0)) {
525
526             if (polled)
527                 PA_ONCE_BEGIN {
528                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
530                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
532                            pa_strnull(dn));
533                     pa_xfree(dn);
534                 } PA_ONCE_END;
535
536 #ifdef DEBUG_TIMING
537             pa_log_debug("Not reading, because not necessary.");
538 #endif
539             break;
540         }
541
542         if (++j > 10) {
543 #ifdef DEBUG_TIMING
544             pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547             break;
548         }
549
550         polled = false;
551
552 #ifdef DEBUG_TIMING
553         pa_log_debug("Reading");
554 #endif
555
556         for (;;) {
557             pa_memchunk chunk;
558             void *p;
559             int err;
560             const snd_pcm_channel_area_t *areas;
561             snd_pcm_uframes_t offset, frames;
562             snd_pcm_sframes_t sframes;
563
564             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
566
567             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
568
569                 if (!after_avail && err == -EAGAIN)
570                     break;
571
572                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
573                     continue;
574
575                 return r;
576             }
577
578             /* Make sure that if these memblocks need to be copied they will fit into one slot */
579             frames = PA_MIN(frames, u->frames_per_block);
580
581             if (!after_avail && frames == 0)
582                 break;
583
584             pa_assert(frames > 0);
585             after_avail = false;
586
587             /* Check these are multiples of 8 bit */
588             pa_assert((areas[0].first & 7) == 0);
589             pa_assert((areas[0].step & 7) == 0);
590
591             /* We assume a single interleaved memory buffer */
592             pa_assert((areas[0].first >> 3) == 0);
593             pa_assert((areas[0].step >> 3) == u->frame_size);
594
595             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
596
597             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
598             chunk.length = pa_memblock_get_length(chunk.memblock);
599             chunk.index = 0;
600
601             pa_source_post(u->source, &chunk);
602             pa_memblock_unref_fixed(chunk.memblock);
603
604             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
605
606                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
607                     continue;
608
609                 return r;
610             }
611
612             work_done = true;
613
614             u->read_count += frames * u->frame_size;
615
616 #ifdef DEBUG_TIMING
617             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
618 #endif
619
620             if ((size_t) frames * u->frame_size >= n_bytes)
621                 break;
622
623             n_bytes -= (size_t) frames * u->frame_size;
624         }
625     }
626
627     if (u->use_tsched) {
628         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
629         process_usec = u->tsched_watermark_usec;
630
631         if (*sleep_usec > process_usec)
632             *sleep_usec -= process_usec;
633         else
634             *sleep_usec = 0;
635     }
636
637     return work_done ? 1 : 0;
638 }
639
640 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
641     int work_done = false;
642     pa_usec_t max_sleep_usec = 0, process_usec = 0;
643     size_t left_to_record;
644     unsigned j = 0;
645
646     pa_assert(u);
647     pa_source_assert_ref(u->source);
648
649     if (u->use_tsched)
650         hw_sleep_time(u, &max_sleep_usec, &process_usec);
651
652     for (;;) {
653         snd_pcm_sframes_t n;
654         size_t n_bytes;
655         int r;
656         bool after_avail = true;
657
658         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
659
660             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
661                 continue;
662
663             return r;
664         }
665
666         n_bytes = (size_t) n * u->frame_size;
667         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
668         on_timeout = false;
669
670         if (u->use_tsched)
671             if (!polled &&
672                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
673                 break;
674
675         if (PA_UNLIKELY(n_bytes <= 0)) {
676
677             if (polled)
678                 PA_ONCE_BEGIN {
679                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
680                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
681                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
682                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
683                            pa_strnull(dn));
684                     pa_xfree(dn);
685                 } PA_ONCE_END;
686
687             break;
688         }
689
690         if (++j > 10) {
691 #ifdef DEBUG_TIMING
692             pa_log_debug("Not filling up, because already too many iterations.");
693 #endif
694
695             break;
696         }
697
698         polled = false;
699
700         for (;;) {
701             void *p;
702             snd_pcm_sframes_t frames;
703             pa_memchunk chunk;
704
705             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
706
707             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
708
709             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
710                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
711
712 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
713
714             p = pa_memblock_acquire(chunk.memblock);
715             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
716             pa_memblock_release(chunk.memblock);
717
718             if (PA_UNLIKELY(frames < 0)) {
719                 pa_memblock_unref(chunk.memblock);
720
721                 if (!after_avail && (int) frames == -EAGAIN)
722                     break;
723
724                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
725                     continue;
726
727                 return r;
728             }
729
730             if (!after_avail && frames == 0) {
731                 pa_memblock_unref(chunk.memblock);
732                 break;
733             }
734
735             pa_assert(frames > 0);
736             after_avail = false;
737
738             chunk.index = 0;
739             chunk.length = (size_t) frames * u->frame_size;
740
741             pa_source_post(u->source, &chunk);
742             pa_memblock_unref(chunk.memblock);
743
744             work_done = true;
745
746             u->read_count += frames * u->frame_size;
747
748 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
749
750             if ((size_t) frames * u->frame_size >= n_bytes)
751                 break;
752
753             n_bytes -= (size_t) frames * u->frame_size;
754         }
755     }
756
757     if (u->use_tsched) {
758         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
759         process_usec = u->tsched_watermark_usec;
760
761         if (*sleep_usec > process_usec)
762             *sleep_usec -= process_usec;
763         else
764             *sleep_usec = 0;
765     }
766
767     return work_done ? 1 : 0;
768 }
769
770 static void update_smoother(struct userdata *u) {
771     snd_pcm_sframes_t delay = 0;
772     uint64_t position;
773     int err;
774     pa_usec_t now1 = 0, now2;
775     snd_pcm_status_t *status;
776     snd_htimestamp_t htstamp = { 0, 0 };
777
778     snd_pcm_status_alloca(&status);
779
780     pa_assert(u);
781     pa_assert(u->pcm_handle);
782
783     /* Let's update the time smoother */
784
785     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
786         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
787         return;
788     }
789
790     snd_pcm_status_get_htstamp(status, &htstamp);
791     now1 = pa_timespec_load(&htstamp);
792
793     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
794     if (now1 <= 0)
795         now1 = pa_rtclock_now();
796
797     /* check if the time since the last update is bigger than the interval */
798     if (u->last_smoother_update > 0)
799         if (u->last_smoother_update + u->smoother_interval > now1)
800             return;
801
802     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
803     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
804
805     pa_smoother_put(u->smoother, now1, now2);
806
807     u->last_smoother_update = now1;
808     /* exponentially increase the update interval up to the MAX limit */
809     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
810 }
811
812 static int64_t source_get_latency(struct userdata *u) {
813     int64_t delay;
814     pa_usec_t now1, now2;
815
816     pa_assert(u);
817
818     now1 = pa_rtclock_now();
819     now2 = pa_smoother_get(u->smoother, now1);
820
821     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
822
823     return delay;
824 }
825
826 static int build_pollfd(struct userdata *u) {
827     pa_assert(u);
828     pa_assert(u->pcm_handle);
829
830     if (u->alsa_rtpoll_item)
831         pa_rtpoll_item_free(u->alsa_rtpoll_item);
832
833     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
834         return -1;
835
836     return 0;
837 }
838
839 /* Called from IO context */
840 static int suspend(struct userdata *u) {
841     pa_assert(u);
842     pa_assert(u->pcm_handle);
843
844     pa_smoother_pause(u->smoother, pa_rtclock_now());
845
846     /* Let's suspend */
847     snd_pcm_close(u->pcm_handle);
848     u->pcm_handle = NULL;
849
850     if (u->alsa_rtpoll_item) {
851         pa_rtpoll_item_free(u->alsa_rtpoll_item);
852         u->alsa_rtpoll_item = NULL;
853     }
854
855     pa_log_info("Device suspended...");
856
857     return 0;
858 }
859
860 /* Called from IO context */
861 static int update_sw_params(struct userdata *u) {
862     snd_pcm_uframes_t avail_min;
863     int err;
864
865     pa_assert(u);
866
867     /* Use the full buffer if no one asked us for anything specific */
868     u->hwbuf_unused = 0;
869
870     if (u->use_tsched) {
871         pa_usec_t latency;
872
873         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
874             size_t b;
875
876             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
877
878             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
879
880             /* We need at least one sample in our buffer */
881
882             if (PA_UNLIKELY(b < u->frame_size))
883                 b = u->frame_size;
884
885             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
886         }
887
888         fix_min_sleep_wakeup(u);
889         fix_tsched_watermark(u);
890     }
891
892     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
893
894     avail_min = 1;
895
896     if (u->use_tsched) {
897         pa_usec_t sleep_usec, process_usec;
898
899         hw_sleep_time(u, &sleep_usec, &process_usec);
900         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
901     }
902
903     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
904
905     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
906         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
907         return err;
908     }
909
910     return 0;
911 }
912
913 /* Called from IO Context on unsuspend or from main thread when creating source */
914 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
915                             bool in_thread) {
916     u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
917
918     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
919     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
920
921     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
922     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
923
924     fix_min_sleep_wakeup(u);
925     fix_tsched_watermark(u);
926
927     if (in_thread)
928         pa_source_set_latency_range_within_thread(u->source,
929                                                   u->min_latency_ref,
930                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
931     else {
932         pa_source_set_latency_range(u->source,
933                                     0,
934                                     pa_bytes_to_usec(u->hwbuf_size, ss));
935
936         /* work-around assert in pa_source_set_latency_within_thead,
937            keep track of min_latency and reuse it when
938            this routine is called from IO context */
939         u->min_latency_ref = u->source->thread_info.min_latency;
940     }
941
942     pa_log_info("Time scheduling watermark is %0.2fms",
943                 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
944 }
945
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
948     pa_sample_spec ss;
949     int err;
950     bool b, d;
951     snd_pcm_uframes_t period_size, buffer_size;
952
953     pa_assert(u);
954     pa_assert(!u->pcm_handle);
955
956     pa_log_info("Trying resume...");
957
958     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
959                             SND_PCM_NONBLOCK|
960                             SND_PCM_NO_AUTO_RESAMPLE|
961                             SND_PCM_NO_AUTO_CHANNELS|
962                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
963         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
964         goto fail;
965     }
966
967     ss = u->source->sample_spec;
968     period_size = u->fragment_size / u->frame_size;
969     buffer_size = u->hwbuf_size / u->frame_size;
970     b = u->use_mmap;
971     d = u->use_tsched;
972
973     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
974         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
975         goto fail;
976     }
977
978     if (b != u->use_mmap || d != u->use_tsched) {
979         pa_log_warn("Resume failed, couldn't get original access mode.");
980         goto fail;
981     }
982
983     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
984         pa_log_warn("Resume failed, couldn't restore original sample settings.");
985         goto fail;
986     }
987
988     if (period_size*u->frame_size != u->fragment_size ||
989         buffer_size*u->frame_size != u->hwbuf_size) {
990         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
993         goto fail;
994     }
995
996     if (update_sw_params(u) < 0)
997         goto fail;
998
999     if (build_pollfd(u) < 0)
1000         goto fail;
1001
1002     /* FIXME: We need to reload the volume somehow */
1003
1004     u->read_count = 0;
1005     pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1006     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1007     u->last_smoother_update = 0;
1008
1009     u->first = true;
1010
1011     /* reset the watermark to the value defined when source was created */
1012     if (u->use_tsched)
1013         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1014
1015     pa_log_info("Resumed successfully...");
1016
1017     return 0;
1018
1019 fail:
1020     if (u->pcm_handle) {
1021         snd_pcm_close(u->pcm_handle);
1022         u->pcm_handle = NULL;
1023     }
1024
1025     return -PA_ERR_IO;
1026 }
1027
1028 /* Called from IO context */
1029 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1030     struct userdata *u = PA_SOURCE(o)->userdata;
1031
1032     switch (code) {
1033
1034         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1035             int64_t r = 0;
1036
1037             if (u->pcm_handle)
1038                 r = source_get_latency(u);
1039
1040             *((int64_t*) data) = r;
1041
1042             return 0;
1043         }
1044
1045         case PA_SOURCE_MESSAGE_SET_STATE:
1046
1047             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1048
1049                 case PA_SOURCE_SUSPENDED: {
1050                     int r;
1051
1052                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1053
1054                     if ((r = suspend(u)) < 0)
1055                         return r;
1056
1057                     break;
1058                 }
1059
1060                 case PA_SOURCE_IDLE:
1061                 case PA_SOURCE_RUNNING: {
1062                     int r;
1063
1064                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1065                         if (build_pollfd(u) < 0)
1066                             return -PA_ERR_IO;
1067                     }
1068
1069                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1070                         if ((r = unsuspend(u)) < 0)
1071                             return r;
1072                     }
1073
1074                     break;
1075                 }
1076
1077                 case PA_SOURCE_UNLINKED:
1078                 case PA_SOURCE_INIT:
1079                 case PA_SOURCE_INVALID_STATE:
1080                     ;
1081             }
1082
1083             break;
1084     }
1085
1086     return pa_source_process_msg(o, code, data, offset, chunk);
1087 }
1088
1089 /* Called from main context */
1090 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1091     pa_source_state_t old_state;
1092     struct userdata *u;
1093
1094     pa_source_assert_ref(s);
1095     pa_assert_se(u = s->userdata);
1096
1097     old_state = pa_source_get_state(u->source);
1098
1099     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1100         reserve_done(u);
1101     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1102         if (reserve_init(u, u->device_name) < 0)
1103             return -PA_ERR_BUSY;
1104
1105     return 0;
1106 }
1107
1108 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1109     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1110
1111     pa_assert(u);
1112     pa_assert(u->mixer_handle);
1113
1114     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1115         return 0;
1116
1117     if (!PA_SOURCE_IS_LINKED(u->source->state))
1118         return 0;
1119
1120     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1121         pa_source_set_mixer_dirty(u->source, true);
1122         return 0;
1123     }
1124
1125     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1126         pa_source_get_volume(u->source, true);
1127         pa_source_get_mute(u->source, true);
1128     }
1129
1130     return 0;
1131 }
1132
1133 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1134     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1135
1136     pa_assert(u);
1137     pa_assert(u->mixer_handle);
1138
1139     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1140         return 0;
1141
1142     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1143         pa_source_set_mixer_dirty(u->source, true);
1144         return 0;
1145     }
1146
1147     if (mask & SND_CTL_EVENT_MASK_VALUE)
1148         pa_source_update_volume_and_mute(u->source);
1149
1150     return 0;
1151 }
1152
1153 static void source_get_volume_cb(pa_source *s) {
1154     struct userdata *u = s->userdata;
1155     pa_cvolume r;
1156     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1157
1158     pa_assert(u);
1159     pa_assert(u->mixer_path);
1160     pa_assert(u->mixer_handle);
1161
1162     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1163         return;
1164
1165     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1166     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1167
1168     pa_log_debug("Read hardware volume: %s",
1169                  pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1170
1171     if (pa_cvolume_equal(&u->hardware_volume, &r))
1172         return;
1173
1174     s->real_volume = u->hardware_volume = r;
1175
1176     /* Hmm, so the hardware volume changed, let's reset our software volume */
1177     if (u->mixer_path->has_dB)
1178         pa_source_set_soft_volume(s, NULL);
1179 }
1180
1181 static void source_set_volume_cb(pa_source *s) {
1182     struct userdata *u = s->userdata;
1183     pa_cvolume r;
1184     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1185     bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1186
1187     pa_assert(u);
1188     pa_assert(u->mixer_path);
1189     pa_assert(u->mixer_handle);
1190
1191     /* Shift up by the base volume */
1192     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1193
1194     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1195         return;
1196
1197     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1198     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1199
1200     u->hardware_volume = r;
1201
1202     if (u->mixer_path->has_dB) {
1203         pa_cvolume new_soft_volume;
1204         bool accurate_enough;
1205
1206         /* Match exactly what the user requested by software */
1207         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1208
1209         /* If the adjustment to do in software is only minimal we
1210          * can skip it. That saves us CPU at the expense of a bit of
1211          * accuracy */
1212         accurate_enough =
1213             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1214             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1215
1216         pa_log_debug("Requested volume: %s",
1217                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1218         pa_log_debug("Got hardware volume: %s",
1219                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1220         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1221                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1222                      pa_yes_no(accurate_enough));
1223
1224         if (!accurate_enough)
1225             s->soft_volume = new_soft_volume;
1226
1227     } else {
1228         pa_log_debug("Wrote hardware volume: %s",
1229                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1230
1231         /* We can't match exactly what the user requested, hence let's
1232          * at least tell the user about it */
1233
1234         s->real_volume = r;
1235     }
1236 }
1237
1238 static void source_write_volume_cb(pa_source *s) {
1239     struct userdata *u = s->userdata;
1240     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1241
1242     pa_assert(u);
1243     pa_assert(u->mixer_path);
1244     pa_assert(u->mixer_handle);
1245     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1246
1247     /* Shift up by the base volume */
1248     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1249
1250     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1251         pa_log_error("Writing HW volume failed");
1252     else {
1253         pa_cvolume tmp_vol;
1254         bool accurate_enough;
1255
1256         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1258
1259         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1260         accurate_enough =
1261             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1262             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1263
1264         if (!accurate_enough) {
1265             char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1266
1267             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1268                          pa_cvolume_snprint_verbose(volume_buf[0],
1269                                                     sizeof(volume_buf[0]),
1270                                                     &s->thread_info.current_hw_volume,
1271                                                     &s->channel_map,
1272                                                     true),
1273                          pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1274         }
1275     }
1276 }
1277
1278 static int source_get_mute_cb(pa_source *s, bool *mute) {
1279     struct userdata *u = s->userdata;
1280
1281     pa_assert(u);
1282     pa_assert(u->mixer_path);
1283     pa_assert(u->mixer_handle);
1284
1285     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1286         return -1;
1287
1288     return 0;
1289 }
1290
1291 static void source_set_mute_cb(pa_source *s) {
1292     struct userdata *u = s->userdata;
1293
1294     pa_assert(u);
1295     pa_assert(u->mixer_path);
1296     pa_assert(u->mixer_handle);
1297
1298     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1299 }
1300
1301 static void mixer_volume_init(struct userdata *u) {
1302     pa_assert(u);
1303
1304     if (!u->mixer_path->has_volume) {
1305         pa_source_set_write_volume_callback(u->source, NULL);
1306         pa_source_set_get_volume_callback(u->source, NULL);
1307         pa_source_set_set_volume_callback(u->source, NULL);
1308
1309         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1310     } else {
1311         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1312         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1313
1314         if (u->mixer_path->has_dB && u->deferred_volume) {
1315             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1316             pa_log_info("Successfully enabled deferred volume.");
1317         } else
1318             pa_source_set_write_volume_callback(u->source, NULL);
1319
1320         if (u->mixer_path->has_dB) {
1321             pa_source_enable_decibel_volume(u->source, true);
1322             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1323
1324             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1325             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1326
1327             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1328         } else {
1329             pa_source_enable_decibel_volume(u->source, false);
1330             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1331
1332             u->source->base_volume = PA_VOLUME_NORM;
1333             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1334         }
1335
1336         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1337     }
1338
1339     if (!u->mixer_path->has_mute) {
1340         pa_source_set_get_mute_callback(u->source, NULL);
1341         pa_source_set_set_mute_callback(u->source, NULL);
1342         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1343     } else {
1344         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1345         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1346         pa_log_info("Using hardware mute control.");
1347     }
1348 }
1349
1350 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1351     struct userdata *u = s->userdata;
1352
1353     pa_assert(u);
1354     pa_assert(p);
1355     pa_assert(u->ucm_context);
1356
1357     return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1358 }
1359
1360 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1361     struct userdata *u = s->userdata;
1362     pa_alsa_port_data *data;
1363
1364     pa_assert(u);
1365     pa_assert(p);
1366     pa_assert(u->mixer_handle);
1367
1368     data = PA_DEVICE_PORT_DATA(p);
1369
1370     pa_assert_se(u->mixer_path = data->path);
1371     pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1372
1373     mixer_volume_init(u);
1374
1375     if (s->set_mute)
1376         s->set_mute(s);
1377     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1378         if (s->write_volume)
1379             s->write_volume(s);
1380     } else {
1381         if (s->set_volume)
1382             s->set_volume(s);
1383     }
1384
1385     return 0;
1386 }
1387
1388 static void source_update_requested_latency_cb(pa_source *s) {
1389     struct userdata *u = s->userdata;
1390     pa_assert(u);
1391     pa_assert(u->use_tsched); /* only when timer scheduling is used
1392                                * we can dynamically adjust the
1393                                * latency */
1394
1395     if (!u->pcm_handle)
1396         return;
1397
1398     update_sw_params(u);
1399 }
1400
1401 static int source_update_rate_cb(pa_source *s, uint32_t rate) {
1402     struct userdata *u = s->userdata;
1403     int i;
1404     bool supported = false;
1405
1406     pa_assert(u);
1407
1408     for (i = 0; u->rates[i]; i++) {
1409         if (u->rates[i] == rate) {
1410             supported = true;
1411             break;
1412         }
1413     }
1414
1415     if (!supported) {
1416         pa_log_info("Source does not support sample rate of %d Hz", rate);
1417         return -1;
1418     }
1419
1420     if (!PA_SOURCE_IS_OPENED(s->state)) {
1421         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1422         u->source->sample_spec.rate = rate;
1423         return 0;
1424     }
1425
1426     return -1;
1427 }
1428
1429 static void thread_func(void *userdata) {
1430     struct userdata *u = userdata;
1431     unsigned short revents = 0;
1432
1433     pa_assert(u);
1434
1435     pa_log_debug("Thread starting up");
1436
1437     if (u->core->realtime_scheduling)
1438         pa_make_realtime(u->core->realtime_priority);
1439
1440     pa_thread_mq_install(&u->thread_mq);
1441
1442     for (;;) {
1443         int ret;
1444         pa_usec_t rtpoll_sleep = 0, real_sleep;
1445
1446 #ifdef DEBUG_TIMING
1447         pa_log_debug("Loop");
1448 #endif
1449
1450         /* Read some data and pass it to the sources */
1451         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1452             int work_done;
1453             pa_usec_t sleep_usec = 0;
1454             bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1455
1456             if (u->first) {
1457                 pa_log_info("Starting capture.");
1458                 snd_pcm_start(u->pcm_handle);
1459
1460                 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1461
1462                 u->first = false;
1463             }
1464
1465             if (u->use_mmap)
1466                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1467             else
1468                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1469
1470             if (work_done < 0)
1471                 goto fail;
1472
1473 /*             pa_log_debug("work_done = %i", work_done); */
1474
1475             if (work_done)
1476                 update_smoother(u);
1477
1478             if (u->use_tsched) {
1479                 pa_usec_t cusec;
1480
1481                 /* OK, the capture buffer is now empty, let's
1482                  * calculate when to wake up next */
1483
1484 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1485
1486                 /* Convert from the sound card time domain to the
1487                  * system time domain */
1488                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1489
1490 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1491
1492                 /* We don't trust the conversion, so we wake up whatever comes first */
1493                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1494             }
1495         }
1496
1497         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1498             pa_usec_t volume_sleep;
1499             pa_source_volume_change_apply(u->source, &volume_sleep);
1500             if (volume_sleep > 0) {
1501                 if (rtpoll_sleep > 0)
1502                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1503                 else
1504                     rtpoll_sleep = volume_sleep;
1505             }
1506         }
1507
1508         if (rtpoll_sleep > 0) {
1509             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1510             real_sleep = pa_rtclock_now();
1511         }
1512         else
1513             pa_rtpoll_set_timer_disabled(u->rtpoll);
1514
1515         /* Hmm, nothing to do. Let's sleep */
1516         if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1517             goto fail;
1518
1519         if (rtpoll_sleep > 0) {
1520             real_sleep = pa_rtclock_now() - real_sleep;
1521 #ifdef DEBUG_TIMING
1522             pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1523                 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1524                 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1525 #endif
1526             if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1527                 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1528                     (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1529                     (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1530         }
1531
1532         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1533             pa_source_volume_change_apply(u->source, NULL);
1534
1535         if (ret == 0)
1536             goto finish;
1537
1538         /* Tell ALSA about this and process its response */
1539         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1540             struct pollfd *pollfd;
1541             int err;
1542             unsigned n;
1543
1544             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1545
1546             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1547                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1548                 goto fail;
1549             }
1550
1551             if (revents & ~POLLIN) {
1552                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1553                     goto fail;
1554
1555                 u->first = true;
1556                 revents = 0;
1557             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1558                 pa_log_debug("Wakeup from ALSA!");
1559
1560         } else
1561             revents = 0;
1562     }
1563
1564 fail:
1565     /* If this was no regular exit from the loop we have to continue
1566      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1567     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1568     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1569
1570 finish:
1571     pa_log_debug("Thread shutting down");
1572 }
1573
1574 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1575     const char *n;
1576     char *t;
1577
1578     pa_assert(data);
1579     pa_assert(ma);
1580     pa_assert(device_name);
1581
1582     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1583         pa_source_new_data_set_name(data, n);
1584         data->namereg_fail = true;
1585         return;
1586     }
1587
1588     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1589         data->namereg_fail = true;
1590     else {
1591         n = device_id ? device_id : device_name;
1592         data->namereg_fail = false;
1593     }
1594
1595     if (mapping)
1596         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1597     else
1598         t = pa_sprintf_malloc("alsa_input.%s", n);
1599
1600     pa_source_new_data_set_name(data, t);
1601     pa_xfree(t);
1602 }
1603
1604 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1605     if (!mapping && !element)
1606         return;
1607
1608     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1609         pa_log_info("Failed to find a working mixer device.");
1610         return;
1611     }
1612
1613     if (element) {
1614
1615         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1616             goto fail;
1617
1618         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1619             goto fail;
1620
1621         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1622         pa_alsa_path_dump(u->mixer_path);
1623     } else if (!(u->mixer_path_set = mapping->input_path_set))
1624         goto fail;
1625
1626     return;
1627
1628 fail:
1629
1630     if (u->mixer_path) {
1631         pa_alsa_path_free(u->mixer_path);
1632         u->mixer_path = NULL;
1633     }
1634
1635     if (u->mixer_handle) {
1636         snd_mixer_close(u->mixer_handle);
1637         u->mixer_handle = NULL;
1638     }
1639 }
1640
1641 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1642     bool need_mixer_callback = false;
1643
1644     pa_assert(u);
1645
1646     if (!u->mixer_handle)
1647         return 0;
1648
1649     if (u->source->active_port) {
1650         pa_alsa_port_data *data;
1651
1652         /* We have a list of supported paths, so let's activate the
1653          * one that has been chosen as active */
1654
1655         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1656         u->mixer_path = data->path;
1657
1658         pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1659
1660     } else {
1661
1662         if (!u->mixer_path && u->mixer_path_set)
1663             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1664
1665         if (u->mixer_path) {
1666             /* Hmm, we have only a single path, then let's activate it */
1667
1668             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1669         } else
1670             return 0;
1671     }
1672
1673     mixer_volume_init(u);
1674
1675     /* Will we need to register callbacks? */
1676     if (u->mixer_path_set && u->mixer_path_set->paths) {
1677         pa_alsa_path *p;
1678         void *state;
1679
1680         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1681             if (p->has_volume || p->has_mute)
1682                 need_mixer_callback = true;
1683         }
1684     }
1685     else if (u->mixer_path)
1686         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1687
1688     if (need_mixer_callback) {
1689         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1690         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1691             u->mixer_pd = pa_alsa_mixer_pdata_new();
1692             mixer_callback = io_mixer_callback;
1693
1694             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1695                 pa_log("Failed to initialize file descriptor monitoring");
1696                 return -1;
1697             }
1698         } else {
1699             u->mixer_fdl = pa_alsa_fdlist_new();
1700             mixer_callback = ctl_mixer_callback;
1701
1702             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1703                 pa_log("Failed to initialize file descriptor monitoring");
1704                 return -1;
1705             }
1706         }
1707
1708         if (u->mixer_path_set)
1709             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1710         else
1711             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1712     }
1713
1714     return 0;
1715 }
1716
1717 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1718
1719     struct userdata *u = NULL;
1720     const char *dev_id = NULL, *key, *mod_name;
1721     pa_sample_spec ss;
1722     char *thread_name = NULL;
1723     uint32_t alternate_sample_rate;
1724     pa_channel_map map;
1725     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1726     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1727     size_t frame_size;
1728     bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1729     pa_source_new_data data;
1730     bool volume_is_set;
1731     bool mute_is_set;
1732     pa_alsa_profile_set *profile_set = NULL;
1733     void *state = NULL;
1734
1735     pa_assert(m);
1736     pa_assert(ma);
1737
1738     ss = m->core->default_sample_spec;
1739     map = m->core->default_channel_map;
1740
1741     /* Pick sample spec overrides from the mapping, if any */
1742     if (mapping) {
1743         if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1744             ss.format = mapping->sample_spec.format;
1745         if (mapping->sample_spec.rate != 0)
1746             ss.rate = mapping->sample_spec.rate;
1747         if (mapping->sample_spec.channels != 0) {
1748             ss.channels = mapping->sample_spec.channels;
1749             if (pa_channel_map_valid(&mapping->channel_map))
1750                 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1751         }
1752     }
1753
1754     /* Override with modargs if provided */
1755     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1756         pa_log("Failed to parse sample specification and channel map");
1757         goto fail;
1758     }
1759
1760     alternate_sample_rate = m->core->alternate_sample_rate;
1761     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1762         pa_log("Failed to parse alternate sample rate");
1763         goto fail;
1764     }
1765
1766     frame_size = pa_frame_size(&ss);
1767
1768     nfrags = m->core->default_n_fragments;
1769     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1770     if (frag_size <= 0)
1771         frag_size = (uint32_t) frame_size;
1772     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1773     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1774
1775     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1776         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1777         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1778         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1779         pa_log("Failed to parse buffer metrics");
1780         goto fail;
1781     }
1782
1783     buffer_size = nfrags * frag_size;
1784
1785     period_frames = frag_size/frame_size;
1786     buffer_frames = buffer_size/frame_size;
1787     tsched_frames = tsched_size/frame_size;
1788
1789     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1790         pa_log("Failed to parse mmap argument.");
1791         goto fail;
1792     }
1793
1794     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1795         pa_log("Failed to parse tsched argument.");
1796         goto fail;
1797     }
1798
1799     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1800         pa_log("Failed to parse ignore_dB argument.");
1801         goto fail;
1802     }
1803
1804     deferred_volume = m->core->deferred_volume;
1805     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1806         pa_log("Failed to parse deferred_volume argument.");
1807         goto fail;
1808     }
1809
1810     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1811         pa_log("Failed to parse fixed_latency_range argument.");
1812         goto fail;
1813     }
1814
1815     use_tsched = pa_alsa_may_tsched(use_tsched);
1816
1817     u = pa_xnew0(struct userdata, 1);
1818     u->core = m->core;
1819     u->module = m;
1820     u->use_mmap = use_mmap;
1821     u->use_tsched = use_tsched;
1822     u->deferred_volume = deferred_volume;
1823     u->fixed_latency_range = fixed_latency_range;
1824     u->first = true;
1825     u->rtpoll = pa_rtpoll_new();
1826
1827     if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
1828         pa_log("pa_thread_mq_init() failed.");
1829         goto fail;
1830     }
1831
1832     u->smoother = pa_smoother_new(
1833             SMOOTHER_ADJUST_USEC,
1834             SMOOTHER_WINDOW_USEC,
1835             true,
1836             true,
1837             5,
1838             pa_rtclock_now(),
1839             true);
1840     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1841
1842     /* use ucm */
1843     if (mapping && mapping->ucm_context.ucm)
1844         u->ucm_context = &mapping->ucm_context;
1845
1846     dev_id = pa_modargs_get_value(
1847             ma, "device_id",
1848             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1849
1850     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1851
1852     if (reserve_init(u, dev_id) < 0)
1853         goto fail;
1854
1855     if (reserve_monitor_init(u, dev_id) < 0)
1856         goto fail;
1857
1858     b = use_mmap;
1859     d = use_tsched;
1860
1861     /* Force ALSA to reread its configuration if module-alsa-card didn't
1862      * do it for us. This matters if our device was hot-plugged after ALSA
1863      * has already read its configuration - see
1864      * https://bugs.freedesktop.org/show_bug.cgi?id=54029
1865      */
1866
1867     if (!card)
1868         snd_config_update_free_global();
1869
1870     if (mapping) {
1871
1872         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1873             pa_log("device_id= not set");
1874             goto fail;
1875         }
1876
1877         if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1878             if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1879                 pa_log("Failed to enable ucm modifier %s", mod_name);
1880             else
1881                 pa_log_debug("Enabled ucm modifier %s", mod_name);
1882         }
1883
1884         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1885                       dev_id,
1886                       &u->device_name,
1887                       &ss, &map,
1888                       SND_PCM_STREAM_CAPTURE,
1889                       &period_frames, &buffer_frames, tsched_frames,
1890                       &b, &d, mapping)))
1891             goto fail;
1892
1893     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1894
1895         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1896             goto fail;
1897
1898         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1899                       dev_id,
1900                       &u->device_name,
1901                       &ss, &map,
1902                       SND_PCM_STREAM_CAPTURE,
1903                       &period_frames, &buffer_frames, tsched_frames,
1904                       &b, &d, profile_set, &mapping)))
1905             goto fail;
1906
1907     } else {
1908
1909         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1910                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1911                       &u->device_name,
1912                       &ss, &map,
1913                       SND_PCM_STREAM_CAPTURE,
1914                       &period_frames, &buffer_frames, tsched_frames,
1915                       &b, &d, false)))
1916             goto fail;
1917     }
1918
1919     pa_assert(u->device_name);
1920     pa_log_info("Successfully opened device %s.", u->device_name);
1921
1922     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1923         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1924         goto fail;
1925     }
1926
1927     if (mapping)
1928         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1929
1930     if (use_mmap && !b) {
1931         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1932         u->use_mmap = use_mmap = false;
1933     }
1934
1935     if (use_tsched && (!b || !d)) {
1936         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1937         u->use_tsched = use_tsched = false;
1938     }
1939
1940     if (u->use_mmap)
1941         pa_log_info("Successfully enabled mmap() mode.");
1942
1943     if (u->use_tsched) {
1944         pa_log_info("Successfully enabled timer-based scheduling mode.");
1945         if (u->fixed_latency_range)
1946             pa_log_info("Disabling latency range changes on overrun");
1947     }
1948
1949     u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1950     if (!u->rates) {
1951         pa_log_error("Failed to find any supported sample rates.");
1952         goto fail;
1953     }
1954
1955     /* ALSA might tweak the sample spec, so recalculate the frame size */
1956     frame_size = pa_frame_size(&ss);
1957
1958     if (!u->ucm_context)
1959         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1960
1961     pa_source_new_data_init(&data);
1962     data.driver = driver;
1963     data.module = m;
1964     data.card = card;
1965     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1966
1967     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1968      * variable instead of using &data.namereg_fail directly, because
1969      * data.namereg_fail is a bitfield and taking the address of a bitfield
1970      * variable is impossible. */
1971     namereg_fail = data.namereg_fail;
1972     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1973         pa_log("Failed to parse namereg_fail argument.");
1974         pa_source_new_data_done(&data);
1975         goto fail;
1976     }
1977     data.namereg_fail = namereg_fail;
1978
1979     pa_source_new_data_set_sample_spec(&data, &ss);
1980     pa_source_new_data_set_channel_map(&data, &map);
1981     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1982
1983     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1984     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1985     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1986     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1987     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1988
1989     if (mapping) {
1990         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1991         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1992
1993         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1994             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1995     }
1996
1997     pa_alsa_init_description(data.proplist, card);
1998
1999     if (u->control_device)
2000         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2001
2002     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2003         pa_log("Invalid properties");
2004         pa_source_new_data_done(&data);
2005         goto fail;
2006     }
2007
2008     if (u->ucm_context)
2009         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
2010     else if (u->mixer_path_set)
2011         pa_alsa_add_ports(&data, u->mixer_path_set, card);
2012
2013     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2014     volume_is_set = data.volume_is_set;
2015     mute_is_set = data.muted_is_set;
2016     pa_source_new_data_done(&data);
2017
2018     if (!u->source) {
2019         pa_log("Failed to create source object");
2020         goto fail;
2021     }
2022
2023     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2024                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
2025         pa_log("Failed to parse deferred_volume_safety_margin parameter");
2026         goto fail;
2027     }
2028
2029     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2030                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
2031         pa_log("Failed to parse deferred_volume_extra_delay parameter");
2032         goto fail;
2033     }
2034
2035     u->source->parent.process_msg = source_process_msg;
2036     if (u->use_tsched)
2037         u->source->update_requested_latency = source_update_requested_latency_cb;
2038     u->source->set_state = source_set_state_cb;
2039     if (u->ucm_context)
2040         u->source->set_port = source_set_port_ucm_cb;
2041     else
2042         u->source->set_port = source_set_port_cb;
2043     if (u->source->alternate_sample_rate)
2044         u->source->update_rate = source_update_rate_cb;
2045     u->source->userdata = u;
2046
2047     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2048     pa_source_set_rtpoll(u->source, u->rtpoll);
2049
2050     u->frame_size = frame_size;
2051     u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2052     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2053     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2054     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2055
2056     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2057                 (double) u->hwbuf_size / (double) u->fragment_size,
2058                 (long unsigned) u->fragment_size,
2059                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2060                 (long unsigned) u->hwbuf_size,
2061                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2062
2063     if (u->use_tsched) {
2064         u->tsched_watermark_ref = tsched_watermark;
2065         reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2066     }
2067     else
2068         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2069
2070     reserve_update(u);
2071
2072     if (update_sw_params(u) < 0)
2073         goto fail;
2074
2075     if (u->ucm_context) {
2076         if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2077             goto fail;
2078     } else if (setup_mixer(u, ignore_dB) < 0)
2079         goto fail;
2080
2081     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2082
2083     thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2084     if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2085         pa_log("Failed to create thread.");
2086         goto fail;
2087     }
2088     pa_xfree(thread_name);
2089     thread_name = NULL;
2090
2091     /* Get initial mixer settings */
2092     if (volume_is_set) {
2093         if (u->source->set_volume)
2094             u->source->set_volume(u->source);
2095     } else {
2096         if (u->source->get_volume)
2097             u->source->get_volume(u->source);
2098     }
2099
2100     if (mute_is_set) {
2101         if (u->source->set_mute)
2102             u->source->set_mute(u->source);
2103     } else {
2104         if (u->source->get_mute) {
2105             bool mute;
2106
2107             if (u->source->get_mute(u->source, &mute) >= 0)
2108                 pa_source_set_mute(u->source, mute, false);
2109         }
2110     }
2111
2112     if ((volume_is_set || mute_is_set) && u->source->write_volume)
2113         u->source->write_volume(u->source);
2114
2115     pa_source_put(u->source);
2116
2117     if (profile_set)
2118         pa_alsa_profile_set_free(profile_set);
2119
2120     return u->source;
2121
2122 fail:
2123     pa_xfree(thread_name);
2124
2125     if (u)
2126         userdata_free(u);
2127
2128     if (profile_set)
2129         pa_alsa_profile_set_free(profile_set);
2130
2131     return NULL;
2132 }
2133
2134 static void userdata_free(struct userdata *u) {
2135     pa_assert(u);
2136
2137     if (u->source)
2138         pa_source_unlink(u->source);
2139
2140     if (u->thread) {
2141         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2142         pa_thread_free(u->thread);
2143     }
2144
2145     pa_thread_mq_done(&u->thread_mq);
2146
2147     if (u->source)
2148         pa_source_unref(u->source);
2149
2150     if (u->mixer_pd)
2151         pa_alsa_mixer_pdata_free(u->mixer_pd);
2152
2153     if (u->alsa_rtpoll_item)
2154         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2155
2156     if (u->rtpoll)
2157         pa_rtpoll_free(u->rtpoll);
2158
2159     if (u->pcm_handle) {
2160         snd_pcm_drop(u->pcm_handle);
2161         snd_pcm_close(u->pcm_handle);
2162     }
2163
2164     if (u->mixer_fdl)
2165         pa_alsa_fdlist_free(u->mixer_fdl);
2166
2167     if (u->mixer_path && !u->mixer_path_set)
2168         pa_alsa_path_free(u->mixer_path);
2169
2170     if (u->mixer_handle)
2171         snd_mixer_close(u->mixer_handle);
2172
2173     if (u->smoother)
2174         pa_smoother_free(u->smoother);
2175
2176     if (u->rates)
2177         pa_xfree(u->rates);
2178
2179     reserve_done(u);
2180     monitor_done(u);
2181
2182     pa_xfree(u->device_name);
2183     pa_xfree(u->control_device);
2184     pa_xfree(u->paths_dir);
2185     pa_xfree(u);
2186 }
2187
2188 void pa_alsa_source_free(pa_source *s) {
2189     struct userdata *u;
2190
2191     pa_source_assert_ref(s);
2192     pa_assert_se(u = s->userdata);
2193
2194     userdata_free(u);
2195 }