2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <asoundlib.h>
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/volume.h>
33 #include <pulse/xmalloc.h>
35 #include <pulsecore/core.h>
36 #include <pulsecore/i18n.h>
37 #include <pulsecore/module.h>
38 #include <pulsecore/memchunk.h>
39 #include <pulsecore/sink.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/core-rtclock.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/thread-mq.h>
48 #include <pulsecore/rtpoll.h>
49 #include <pulsecore/time-smoother.h>
51 #include <modules/reserve-wrap.h>
53 #include "alsa-util.h"
54 #include "alsa-source.h"
56 /* #define DEBUG_TIMING */
58 #define DEFAULT_DEVICE "default"
60 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
61 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
63 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
64 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
65 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
66 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
67 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
68 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
70 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
73 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
74 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
87 pa_thread_mq thread_mq;
90 snd_pcm_t *pcm_handle;
93 pa_alsa_fdlist *mixer_fdl;
94 pa_alsa_mixer_pdata *mixer_pd;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
99 pa_cvolume hardware_volume;
108 tsched_watermark_ref,
114 watermark_inc_threshold,
115 watermark_dec_threshold;
117 snd_pcm_uframes_t frames_per_block;
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121 pa_usec_t tsched_watermark_usec;
123 char *device_name; /* name of the PCM device */
124 char *control_device; /* name of the control device */
126 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
130 pa_rtpoll_item *alsa_rtpoll_item;
132 pa_smoother *smoother;
134 pa_usec_t smoother_interval;
135 pa_usec_t last_smoother_update;
137 pa_reserve_wrapper *reserve;
138 pa_hook_slot *reserve_slot;
139 pa_reserve_monitor_wrapper *monitor;
140 pa_hook_slot *monitor_slot;
143 pa_alsa_ucm_mapping_context *ucm_context;
146 static void userdata_free(struct userdata *u);
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
152 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
154 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155 return PA_HOOK_CANCEL;
160 static void reserve_done(struct userdata *u) {
163 if (u->reserve_slot) {
164 pa_hook_slot_free(u->reserve_slot);
165 u->reserve_slot = NULL;
169 pa_reserve_wrapper_unref(u->reserve);
174 static void reserve_update(struct userdata *u) {
175 const char *description;
178 if (!u->source || !u->reserve)
181 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
185 static int reserve_init(struct userdata *u, const char *dname) {
194 if (pa_in_system_mode())
197 if (!(rname = pa_alsa_get_reserve_name(dname)))
200 /* We are resuming, try to lock the device */
201 u->reserve = pa_reserve_wrapper_get(u->core, rname);
209 pa_assert(!u->reserve_slot);
210 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
219 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
223 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
230 static void monitor_done(struct userdata *u) {
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
239 pa_reserve_monitor_wrapper_unref(u->monitor);
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
250 if (pa_in_system_mode())
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
256 /* We are resuming, try to lock the device */
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
273 pa_assert(u->use_tsched);
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
285 static void fix_tsched_watermark(struct userdata *u) {
288 pa_assert(u->use_tsched);
290 max_use = u->hwbuf_size - u->hwbuf_unused;
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
298 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
306 pa_assert(u->use_tsched);
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
319 /* Hmm, we cannot increase the watermark any further, hence let's
320 raise the latency unless doing so was disabled in
322 if (u->fixed_latency_range)
325 old_min_latency = u->source->thread_info.min_latency;
326 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
329 if (old_min_latency != new_min_latency) {
330 pa_log_info("Increasing minimal latency to %0.2f ms",
331 (double) new_min_latency / PA_USEC_PER_MSEC);
333 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
336 /* When we reach this we're officially fucked! */
339 static void decrease_watermark(struct userdata *u) {
340 size_t old_watermark;
344 pa_assert(u->use_tsched);
346 now = pa_rtclock_now();
348 if (u->watermark_dec_not_before <= 0)
351 if (u->watermark_dec_not_before > now)
354 old_watermark = u->tsched_watermark;
356 if (u->tsched_watermark < u->watermark_dec_step)
357 u->tsched_watermark = u->tsched_watermark / 2;
359 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
361 fix_tsched_watermark(u);
363 if (old_watermark != u->tsched_watermark)
364 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
367 /* We don't change the latency range*/
370 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
376 pa_assert(sleep_usec);
377 pa_assert(process_usec);
380 pa_assert(u->use_tsched);
382 usec = pa_source_get_requested_latency_within_thread(u->source);
384 if (usec == (pa_usec_t) -1)
385 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
387 wm = u->tsched_watermark_usec;
392 *sleep_usec = usec - wm;
396 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397 (unsigned long) (usec / PA_USEC_PER_MSEC),
398 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
403 static int try_recover(struct userdata *u, const char *call, int err) {
408 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
410 pa_assert(err != -EAGAIN);
413 pa_log_debug("%s: Buffer overrun!", call);
415 if (err == -ESTRPIPE)
416 pa_log_debug("%s: System suspended!", call);
418 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419 pa_log("%s: %s", call, pa_alsa_strerror(err));
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428 size_t left_to_record;
429 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430 bool overrun = false;
432 /* We use <= instead of < for this check here because an overrun
433 * only happens after the last sample was processed, not already when
434 * it is removed from the buffer. This is particularly important
435 * when block transfer is used. */
437 if (n_bytes <= rec_space)
438 left_to_record = rec_space - n_bytes;
441 /* We got a dropout. What a mess! */
449 if (pa_log_ratelimit(PA_LOG_INFO))
450 pa_log_info("Overrun!");
454 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
458 bool reset_not_before = true;
460 if (overrun || left_to_record < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_record > u->watermark_dec_threshold) {
463 reset_not_before = false;
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
470 decrease_watermark(u);
473 if (reset_not_before)
474 u->watermark_dec_not_before = 0;
477 return left_to_record;
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481 bool work_done = false;
482 pa_usec_t max_sleep_usec = 0, process_usec = 0;
483 size_t left_to_record;
487 pa_source_assert_ref(u->source);
490 hw_sleep_time(u, &max_sleep_usec, &process_usec);
496 bool after_avail = true;
498 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
500 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
506 n_bytes = (size_t) n * u->frame_size;
509 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
517 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
519 pa_log_debug("Not reading, because too early.");
524 if (PA_UNLIKELY(n_bytes <= 0)) {
528 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
530 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
537 pa_log_debug("Not reading, because not necessary.");
544 pa_log_debug("Not filling up, because already too many iterations.");
553 pa_log_debug("Reading");
560 const snd_pcm_channel_area_t *areas;
561 snd_pcm_uframes_t offset, frames;
562 snd_pcm_sframes_t sframes;
564 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
567 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
569 if (!after_avail && err == -EAGAIN)
572 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
578 /* Make sure that if these memblocks need to be copied they will fit into one slot */
579 frames = PA_MIN(frames, u->frames_per_block);
581 if (!after_avail && frames == 0)
584 pa_assert(frames > 0);
587 /* Check these are multiples of 8 bit */
588 pa_assert((areas[0].first & 7) == 0);
589 pa_assert((areas[0].step & 7) == 0);
591 /* We assume a single interleaved memory buffer */
592 pa_assert((areas[0].first >> 3) == 0);
593 pa_assert((areas[0].step >> 3) == u->frame_size);
595 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
597 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
598 chunk.length = pa_memblock_get_length(chunk.memblock);
601 pa_source_post(u->source, &chunk);
602 pa_memblock_unref_fixed(chunk.memblock);
604 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
606 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
614 u->read_count += frames * u->frame_size;
617 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
620 if ((size_t) frames * u->frame_size >= n_bytes)
623 n_bytes -= (size_t) frames * u->frame_size;
628 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
629 process_usec = u->tsched_watermark_usec;
631 if (*sleep_usec > process_usec)
632 *sleep_usec -= process_usec;
637 return work_done ? 1 : 0;
640 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
641 int work_done = false;
642 pa_usec_t max_sleep_usec = 0, process_usec = 0;
643 size_t left_to_record;
647 pa_source_assert_ref(u->source);
650 hw_sleep_time(u, &max_sleep_usec, &process_usec);
656 bool after_avail = true;
658 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
660 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
666 n_bytes = (size_t) n * u->frame_size;
667 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
672 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
675 if (PA_UNLIKELY(n_bytes <= 0)) {
679 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
680 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
681 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
682 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
692 pa_log_debug("Not filling up, because already too many iterations.");
702 snd_pcm_sframes_t frames;
705 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
707 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
709 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
710 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
712 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
714 p = pa_memblock_acquire(chunk.memblock);
715 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
716 pa_memblock_release(chunk.memblock);
718 if (PA_UNLIKELY(frames < 0)) {
719 pa_memblock_unref(chunk.memblock);
721 if (!after_avail && (int) frames == -EAGAIN)
724 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
730 if (!after_avail && frames == 0) {
731 pa_memblock_unref(chunk.memblock);
735 pa_assert(frames > 0);
739 chunk.length = (size_t) frames * u->frame_size;
741 pa_source_post(u->source, &chunk);
742 pa_memblock_unref(chunk.memblock);
746 u->read_count += frames * u->frame_size;
748 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
750 if ((size_t) frames * u->frame_size >= n_bytes)
753 n_bytes -= (size_t) frames * u->frame_size;
758 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
759 process_usec = u->tsched_watermark_usec;
761 if (*sleep_usec > process_usec)
762 *sleep_usec -= process_usec;
767 return work_done ? 1 : 0;
770 static void update_smoother(struct userdata *u) {
771 snd_pcm_sframes_t delay = 0;
774 pa_usec_t now1 = 0, now2;
775 snd_pcm_status_t *status;
776 snd_htimestamp_t htstamp = { 0, 0 };
778 snd_pcm_status_alloca(&status);
781 pa_assert(u->pcm_handle);
783 /* Let's update the time smoother */
785 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
786 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
790 snd_pcm_status_get_htstamp(status, &htstamp);
791 now1 = pa_timespec_load(&htstamp);
793 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
795 now1 = pa_rtclock_now();
797 /* check if the time since the last update is bigger than the interval */
798 if (u->last_smoother_update > 0)
799 if (u->last_smoother_update + u->smoother_interval > now1)
802 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
803 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
805 pa_smoother_put(u->smoother, now1, now2);
807 u->last_smoother_update = now1;
808 /* exponentially increase the update interval up to the MAX limit */
809 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
812 static int64_t source_get_latency(struct userdata *u) {
814 pa_usec_t now1, now2;
818 now1 = pa_rtclock_now();
819 now2 = pa_smoother_get(u->smoother, now1);
821 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
826 static int build_pollfd(struct userdata *u) {
828 pa_assert(u->pcm_handle);
830 if (u->alsa_rtpoll_item)
831 pa_rtpoll_item_free(u->alsa_rtpoll_item);
833 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
839 /* Called from IO context */
840 static void suspend(struct userdata *u) {
842 pa_assert(u->pcm_handle);
844 pa_smoother_pause(u->smoother, pa_rtclock_now());
847 snd_pcm_close(u->pcm_handle);
848 u->pcm_handle = NULL;
850 if (u->alsa_rtpoll_item) {
851 pa_rtpoll_item_free(u->alsa_rtpoll_item);
852 u->alsa_rtpoll_item = NULL;
855 pa_log_info("Device suspended...");
858 /* Called from IO context */
859 static int update_sw_params(struct userdata *u) {
860 snd_pcm_uframes_t avail_min;
865 /* Use the full buffer if no one asked us for anything specific */
871 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
874 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
876 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
878 /* We need at least one sample in our buffer */
880 if (PA_UNLIKELY(b < u->frame_size))
883 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
886 fix_min_sleep_wakeup(u);
887 fix_tsched_watermark(u);
890 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
895 pa_usec_t sleep_usec, process_usec;
897 hw_sleep_time(u, &sleep_usec, &process_usec);
898 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
901 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
903 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
904 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
911 /* Called from IO Context on unsuspend or from main thread when creating source */
912 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
914 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
916 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
917 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
919 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
920 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
922 fix_min_sleep_wakeup(u);
923 fix_tsched_watermark(u);
926 pa_source_set_latency_range_within_thread(u->source,
928 pa_bytes_to_usec(u->hwbuf_size, ss));
930 pa_source_set_latency_range(u->source,
932 pa_bytes_to_usec(u->hwbuf_size, ss));
934 /* work-around assert in pa_source_set_latency_within_thead,
935 keep track of min_latency and reuse it when
936 this routine is called from IO context */
937 u->min_latency_ref = u->source->thread_info.min_latency;
940 pa_log_info("Time scheduling watermark is %0.2fms",
941 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
944 /* Called from IO context */
945 static int unsuspend(struct userdata *u) {
949 snd_pcm_uframes_t period_size, buffer_size;
952 pa_assert(!u->pcm_handle);
954 pa_log_info("Trying resume...");
956 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
958 SND_PCM_NO_AUTO_RESAMPLE|
959 SND_PCM_NO_AUTO_CHANNELS|
960 SND_PCM_NO_AUTO_FORMAT)) < 0) {
961 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
965 ss = u->source->sample_spec;
966 period_size = u->fragment_size / u->frame_size;
967 buffer_size = u->hwbuf_size / u->frame_size;
971 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
972 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
976 if (b != u->use_mmap || d != u->use_tsched) {
977 pa_log_warn("Resume failed, couldn't get original access mode.");
981 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
982 pa_log_warn("Resume failed, couldn't restore original sample settings.");
986 if (period_size*u->frame_size != u->fragment_size ||
987 buffer_size*u->frame_size != u->hwbuf_size) {
988 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
989 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
990 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
994 if (update_sw_params(u) < 0)
997 if (build_pollfd(u) < 0)
1000 /* FIXME: We need to reload the volume somehow */
1003 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1004 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1005 u->last_smoother_update = 0;
1009 /* reset the watermark to the value defined when source was created */
1011 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1013 pa_log_info("Resumed successfully...");
1018 if (u->pcm_handle) {
1019 snd_pcm_close(u->pcm_handle);
1020 u->pcm_handle = NULL;
1026 /* Called from IO context */
1027 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1028 struct userdata *u = PA_SOURCE(o)->userdata;
1032 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1036 r = source_get_latency(u);
1038 *((int64_t*) data) = r;
1044 return pa_source_process_msg(o, code, data, offset, chunk);
1047 /* Called from main context */
1048 static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1049 pa_source_state_t old_state;
1052 pa_source_assert_ref(s);
1053 pa_assert_se(u = s->userdata);
1055 old_state = pa_source_get_state(u->source);
1057 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1059 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1060 if (reserve_init(u, u->device_name) < 0)
1061 return -PA_ERR_BUSY;
1066 /* Called from the IO thread. */
1067 static int source_set_state_in_io_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1071 pa_assert_se(u = s->userdata);
1073 /* It may be that only the suspend cause is changing, in which case there's
1075 if (new_state == s->thread_info.state)
1078 switch (new_state) {
1080 case PA_SOURCE_SUSPENDED: {
1081 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1088 case PA_SOURCE_IDLE:
1089 case PA_SOURCE_RUNNING: {
1092 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1093 if (build_pollfd(u) < 0)
1094 /* FIXME: This will cause an assertion failure, because
1095 * with the current design pa_source_put() is not allowed
1096 * to fail and pa_source_put() has no fallback code that
1097 * would start the source suspended if opening the device
1102 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1103 if ((r = unsuspend(u)) < 0)
1110 case PA_SOURCE_UNLINKED:
1111 case PA_SOURCE_INIT:
1112 case PA_SOURCE_INVALID_STATE:
1119 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1120 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1123 pa_assert(u->mixer_handle);
1125 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1128 if (!PA_SOURCE_IS_LINKED(u->source->state))
1131 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1132 pa_source_set_mixer_dirty(u->source, true);
1136 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1137 pa_source_get_volume(u->source, true);
1138 pa_source_get_mute(u->source, true);
1144 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1145 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1148 pa_assert(u->mixer_handle);
1150 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1153 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1154 pa_source_set_mixer_dirty(u->source, true);
1158 if (mask & SND_CTL_EVENT_MASK_VALUE)
1159 pa_source_update_volume_and_mute(u->source);
1164 static void source_get_volume_cb(pa_source *s) {
1165 struct userdata *u = s->userdata;
1167 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1170 pa_assert(u->mixer_path);
1171 pa_assert(u->mixer_handle);
1173 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1176 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1177 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1179 pa_log_debug("Read hardware volume: %s",
1180 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1182 if (pa_cvolume_equal(&u->hardware_volume, &r))
1185 s->real_volume = u->hardware_volume = r;
1187 /* Hmm, so the hardware volume changed, let's reset our software volume */
1188 if (u->mixer_path->has_dB)
1189 pa_source_set_soft_volume(s, NULL);
1192 static void source_set_volume_cb(pa_source *s) {
1193 struct userdata *u = s->userdata;
1195 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1196 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1199 pa_assert(u->mixer_path);
1200 pa_assert(u->mixer_handle);
1202 /* Shift up by the base volume */
1203 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1205 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1208 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1209 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1211 u->hardware_volume = r;
1213 if (u->mixer_path->has_dB) {
1214 pa_cvolume new_soft_volume;
1215 bool accurate_enough;
1217 /* Match exactly what the user requested by software */
1218 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1220 /* If the adjustment to do in software is only minimal we
1221 * can skip it. That saves us CPU at the expense of a bit of
1224 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1225 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1227 pa_log_debug("Requested volume: %s",
1228 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1229 pa_log_debug("Got hardware volume: %s",
1230 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1231 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1232 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1233 pa_yes_no(accurate_enough));
1235 if (!accurate_enough)
1236 s->soft_volume = new_soft_volume;
1239 pa_log_debug("Wrote hardware volume: %s",
1240 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1242 /* We can't match exactly what the user requested, hence let's
1243 * at least tell the user about it */
1249 static void source_write_volume_cb(pa_source *s) {
1250 struct userdata *u = s->userdata;
1251 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1254 pa_assert(u->mixer_path);
1255 pa_assert(u->mixer_handle);
1256 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1258 /* Shift up by the base volume */
1259 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1261 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1262 pa_log_error("Writing HW volume failed");
1265 bool accurate_enough;
1267 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1268 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1270 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1272 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1273 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1275 if (!accurate_enough) {
1276 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1278 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1279 pa_cvolume_snprint_verbose(volume_buf[0],
1280 sizeof(volume_buf[0]),
1281 &s->thread_info.current_hw_volume,
1284 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1289 static int source_get_mute_cb(pa_source *s, bool *mute) {
1290 struct userdata *u = s->userdata;
1293 pa_assert(u->mixer_path);
1294 pa_assert(u->mixer_handle);
1296 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1302 static void source_set_mute_cb(pa_source *s) {
1303 struct userdata *u = s->userdata;
1306 pa_assert(u->mixer_path);
1307 pa_assert(u->mixer_handle);
1309 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1312 static void mixer_volume_init(struct userdata *u) {
1315 if (!u->mixer_path->has_volume) {
1316 pa_source_set_write_volume_callback(u->source, NULL);
1317 pa_source_set_get_volume_callback(u->source, NULL);
1318 pa_source_set_set_volume_callback(u->source, NULL);
1320 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1322 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1323 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1325 if (u->mixer_path->has_dB && u->deferred_volume) {
1326 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1327 pa_log_info("Successfully enabled deferred volume.");
1329 pa_source_set_write_volume_callback(u->source, NULL);
1331 if (u->mixer_path->has_dB) {
1332 pa_source_enable_decibel_volume(u->source, true);
1333 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1335 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1336 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1338 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1340 pa_source_enable_decibel_volume(u->source, false);
1341 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1343 u->source->base_volume = PA_VOLUME_NORM;
1344 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1347 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1350 if (!u->mixer_path->has_mute) {
1351 pa_source_set_get_mute_callback(u->source, NULL);
1352 pa_source_set_set_mute_callback(u->source, NULL);
1353 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1355 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1356 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1357 pa_log_info("Using hardware mute control.");
1361 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1362 struct userdata *u = s->userdata;
1366 pa_assert(u->ucm_context);
1368 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1371 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1372 struct userdata *u = s->userdata;
1373 pa_alsa_port_data *data;
1377 pa_assert(u->mixer_handle);
1379 data = PA_DEVICE_PORT_DATA(p);
1381 pa_assert_se(u->mixer_path = data->path);
1382 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1384 mixer_volume_init(u);
1388 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1389 if (s->write_volume)
1399 static void source_update_requested_latency_cb(pa_source *s) {
1400 struct userdata *u = s->userdata;
1402 pa_assert(u->use_tsched); /* only when timer scheduling is used
1403 * we can dynamically adjust the
1409 update_sw_params(u);
1412 static int source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1413 struct userdata *u = s->userdata;
1415 bool supported = false;
1417 /* FIXME: we only update rate for now */
1421 for (i = 0; u->rates[i]; i++) {
1422 if (u->rates[i] == spec->rate) {
1429 pa_log_info("Source does not support sample rate of %d Hz", spec->rate);
1433 if (!PA_SOURCE_IS_OPENED(s->state)) {
1434 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, spec->rate);
1435 u->source->sample_spec.rate = spec->rate;
1442 static void thread_func(void *userdata) {
1443 struct userdata *u = userdata;
1444 unsigned short revents = 0;
1448 pa_log_debug("Thread starting up");
1450 if (u->core->realtime_scheduling)
1451 pa_make_realtime(u->core->realtime_priority);
1453 pa_thread_mq_install(&u->thread_mq);
1457 pa_usec_t rtpoll_sleep = 0, real_sleep;
1460 pa_log_debug("Loop");
1463 /* Read some data and pass it to the sources */
1464 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1466 pa_usec_t sleep_usec = 0;
1467 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1470 pa_log_info("Starting capture.");
1471 snd_pcm_start(u->pcm_handle);
1473 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1479 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1481 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1486 /* pa_log_debug("work_done = %i", work_done); */
1491 if (u->use_tsched) {
1494 /* OK, the capture buffer is now empty, let's
1495 * calculate when to wake up next */
1497 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1499 /* Convert from the sound card time domain to the
1500 * system time domain */
1501 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1503 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1505 /* We don't trust the conversion, so we wake up whatever comes first */
1506 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1510 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1511 pa_usec_t volume_sleep;
1512 pa_source_volume_change_apply(u->source, &volume_sleep);
1513 if (volume_sleep > 0) {
1514 if (rtpoll_sleep > 0)
1515 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1517 rtpoll_sleep = volume_sleep;
1521 if (rtpoll_sleep > 0) {
1522 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1523 real_sleep = pa_rtclock_now();
1526 pa_rtpoll_set_timer_disabled(u->rtpoll);
1528 /* Hmm, nothing to do. Let's sleep */
1529 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1532 if (rtpoll_sleep > 0) {
1533 real_sleep = pa_rtclock_now() - real_sleep;
1535 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1536 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1537 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1539 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1540 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1541 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1542 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1545 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1546 pa_source_volume_change_apply(u->source, NULL);
1551 /* Tell ALSA about this and process its response */
1552 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1553 struct pollfd *pollfd;
1557 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1559 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1560 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1564 if (revents & ~POLLIN) {
1565 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1570 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1571 pa_log_debug("Wakeup from ALSA!");
1578 /* If this was no regular exit from the loop we have to continue
1579 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1580 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1581 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1584 pa_log_debug("Thread shutting down");
1587 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1593 pa_assert(device_name);
1595 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1596 pa_source_new_data_set_name(data, n);
1597 data->namereg_fail = true;
1601 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1602 data->namereg_fail = true;
1604 n = device_id ? device_id : device_name;
1605 data->namereg_fail = false;
1609 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1611 t = pa_sprintf_malloc("alsa_input.%s", n);
1613 pa_source_new_data_set_name(data, t);
1617 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1618 if (!mapping && !element)
1621 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1622 pa_log_info("Failed to find a working mixer device.");
1628 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1631 if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
1634 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1635 pa_alsa_path_dump(u->mixer_path);
1636 } else if (!(u->mixer_path_set = mapping->input_path_set))
1643 if (u->mixer_path) {
1644 pa_alsa_path_free(u->mixer_path);
1645 u->mixer_path = NULL;
1648 if (u->mixer_handle) {
1649 snd_mixer_close(u->mixer_handle);
1650 u->mixer_handle = NULL;
1654 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1655 bool need_mixer_callback = false;
1659 if (!u->mixer_handle)
1662 if (u->source->active_port) {
1663 pa_alsa_port_data *data;
1665 /* We have a list of supported paths, so let's activate the
1666 * one that has been chosen as active */
1668 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1669 u->mixer_path = data->path;
1671 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1675 if (!u->mixer_path && u->mixer_path_set)
1676 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1678 if (u->mixer_path) {
1679 /* Hmm, we have only a single path, then let's activate it */
1681 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1686 mixer_volume_init(u);
1688 /* Will we need to register callbacks? */
1689 if (u->mixer_path_set && u->mixer_path_set->paths) {
1693 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1694 if (p->has_volume || p->has_mute)
1695 need_mixer_callback = true;
1698 else if (u->mixer_path)
1699 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1701 if (need_mixer_callback) {
1702 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1703 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1704 u->mixer_pd = pa_alsa_mixer_pdata_new();
1705 mixer_callback = io_mixer_callback;
1707 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1708 pa_log("Failed to initialize file descriptor monitoring");
1712 u->mixer_fdl = pa_alsa_fdlist_new();
1713 mixer_callback = ctl_mixer_callback;
1715 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1716 pa_log("Failed to initialize file descriptor monitoring");
1721 if (u->mixer_path_set)
1722 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1724 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1730 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1732 struct userdata *u = NULL;
1733 const char *dev_id = NULL, *key, *mod_name;
1735 char *thread_name = NULL;
1736 uint32_t alternate_sample_rate;
1738 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1739 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1741 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1742 pa_source_new_data data;
1745 pa_alsa_profile_set *profile_set = NULL;
1751 ss = m->core->default_sample_spec;
1752 map = m->core->default_channel_map;
1754 /* Pick sample spec overrides from the mapping, if any */
1756 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1757 ss.format = mapping->sample_spec.format;
1758 if (mapping->sample_spec.rate != 0)
1759 ss.rate = mapping->sample_spec.rate;
1760 if (mapping->sample_spec.channels != 0) {
1761 ss.channels = mapping->sample_spec.channels;
1762 if (pa_channel_map_valid(&mapping->channel_map))
1763 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1767 /* Override with modargs if provided */
1768 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1769 pa_log("Failed to parse sample specification and channel map");
1773 alternate_sample_rate = m->core->alternate_sample_rate;
1774 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1775 pa_log("Failed to parse alternate sample rate");
1779 frame_size = pa_frame_size(&ss);
1781 nfrags = m->core->default_n_fragments;
1782 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1784 frag_size = (uint32_t) frame_size;
1785 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1786 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1788 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1789 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1790 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1791 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1792 pa_log("Failed to parse buffer metrics");
1796 buffer_size = nfrags * frag_size;
1798 period_frames = frag_size/frame_size;
1799 buffer_frames = buffer_size/frame_size;
1800 tsched_frames = tsched_size/frame_size;
1802 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1803 pa_log("Failed to parse mmap argument.");
1807 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1808 pa_log("Failed to parse tsched argument.");
1812 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1813 pa_log("Failed to parse ignore_dB argument.");
1817 deferred_volume = m->core->deferred_volume;
1818 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1819 pa_log("Failed to parse deferred_volume argument.");
1823 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1824 pa_log("Failed to parse fixed_latency_range argument.");
1828 use_tsched = pa_alsa_may_tsched(use_tsched);
1830 u = pa_xnew0(struct userdata, 1);
1833 u->use_mmap = use_mmap;
1834 u->use_tsched = use_tsched;
1835 u->deferred_volume = deferred_volume;
1836 u->fixed_latency_range = fixed_latency_range;
1838 u->rtpoll = pa_rtpoll_new();
1840 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
1841 pa_log("pa_thread_mq_init() failed.");
1845 u->smoother = pa_smoother_new(
1846 SMOOTHER_ADJUST_USEC,
1847 SMOOTHER_WINDOW_USEC,
1853 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1856 if (mapping && mapping->ucm_context.ucm)
1857 u->ucm_context = &mapping->ucm_context;
1859 dev_id = pa_modargs_get_value(
1861 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1863 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1865 if (reserve_init(u, dev_id) < 0)
1868 if (reserve_monitor_init(u, dev_id) < 0)
1874 /* Force ALSA to reread its configuration if module-alsa-card didn't
1875 * do it for us. This matters if our device was hot-plugged after ALSA
1876 * has already read its configuration - see
1877 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
1881 snd_config_update_free_global();
1885 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1886 pa_log("device_id= not set");
1890 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1891 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1892 pa_log("Failed to enable ucm modifier %s", mod_name);
1894 pa_log_debug("Enabled ucm modifier %s", mod_name);
1897 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1901 SND_PCM_STREAM_CAPTURE,
1902 &period_frames, &buffer_frames, tsched_frames,
1906 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1908 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1911 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1915 SND_PCM_STREAM_CAPTURE,
1916 &period_frames, &buffer_frames, tsched_frames,
1917 &b, &d, profile_set, &mapping)))
1922 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1923 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1926 SND_PCM_STREAM_CAPTURE,
1927 &period_frames, &buffer_frames, tsched_frames,
1932 pa_assert(u->device_name);
1933 pa_log_info("Successfully opened device %s.", u->device_name);
1935 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1936 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1941 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1943 if (use_mmap && !b) {
1944 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1945 u->use_mmap = use_mmap = false;
1948 if (use_tsched && (!b || !d)) {
1949 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1950 u->use_tsched = use_tsched = false;
1954 pa_log_info("Successfully enabled mmap() mode.");
1956 if (u->use_tsched) {
1957 pa_log_info("Successfully enabled timer-based scheduling mode.");
1958 if (u->fixed_latency_range)
1959 pa_log_info("Disabling latency range changes on overrun");
1962 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1964 pa_log_error("Failed to find any supported sample rates.");
1968 /* ALSA might tweak the sample spec, so recalculate the frame size */
1969 frame_size = pa_frame_size(&ss);
1971 if (!u->ucm_context)
1972 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1974 pa_source_new_data_init(&data);
1975 data.driver = driver;
1978 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1980 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1981 * variable instead of using &data.namereg_fail directly, because
1982 * data.namereg_fail is a bitfield and taking the address of a bitfield
1983 * variable is impossible. */
1984 namereg_fail = data.namereg_fail;
1985 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1986 pa_log("Failed to parse namereg_fail argument.");
1987 pa_source_new_data_done(&data);
1990 data.namereg_fail = namereg_fail;
1992 pa_source_new_data_set_sample_spec(&data, &ss);
1993 pa_source_new_data_set_channel_map(&data, &map);
1994 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1996 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1997 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1998 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1999 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2000 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2003 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2004 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2006 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2007 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2010 pa_alsa_init_description(data.proplist, card);
2012 if (u->control_device)
2013 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2015 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2016 pa_log("Invalid properties");
2017 pa_source_new_data_done(&data);
2022 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
2023 else if (u->mixer_path_set)
2024 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2026 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2027 volume_is_set = data.volume_is_set;
2028 mute_is_set = data.muted_is_set;
2029 pa_source_new_data_done(&data);
2032 pa_log("Failed to create source object");
2036 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2037 &u->source->thread_info.volume_change_safety_margin) < 0) {
2038 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2042 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2043 &u->source->thread_info.volume_change_extra_delay) < 0) {
2044 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2048 u->source->parent.process_msg = source_process_msg;
2050 u->source->update_requested_latency = source_update_requested_latency_cb;
2051 u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb;
2052 u->source->set_state_in_io_thread = source_set_state_in_io_thread_cb;
2054 u->source->set_port = source_set_port_ucm_cb;
2056 u->source->set_port = source_set_port_cb;
2057 if (u->source->alternate_sample_rate)
2058 u->source->reconfigure = source_reconfigure_cb;
2059 u->source->userdata = u;
2061 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2062 pa_source_set_rtpoll(u->source, u->rtpoll);
2064 u->frame_size = frame_size;
2065 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2066 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2067 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2068 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2070 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2071 (double) u->hwbuf_size / (double) u->fragment_size,
2072 (long unsigned) u->fragment_size,
2073 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2074 (long unsigned) u->hwbuf_size,
2075 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2077 if (u->use_tsched) {
2078 u->tsched_watermark_ref = tsched_watermark;
2079 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2082 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2086 if (update_sw_params(u) < 0)
2089 if (u->ucm_context) {
2090 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2092 } else if (setup_mixer(u, ignore_dB) < 0)
2095 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2097 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2098 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2099 pa_log("Failed to create thread.");
2102 pa_xfree(thread_name);
2105 /* Get initial mixer settings */
2106 if (volume_is_set) {
2107 if (u->source->set_volume)
2108 u->source->set_volume(u->source);
2110 if (u->source->get_volume)
2111 u->source->get_volume(u->source);
2115 if (u->source->set_mute)
2116 u->source->set_mute(u->source);
2118 if (u->source->get_mute) {
2121 if (u->source->get_mute(u->source, &mute) >= 0)
2122 pa_source_set_mute(u->source, mute, false);
2126 if ((volume_is_set || mute_is_set) && u->source->write_volume)
2127 u->source->write_volume(u->source);
2129 pa_source_put(u->source);
2132 pa_alsa_profile_set_free(profile_set);
2137 pa_xfree(thread_name);
2143 pa_alsa_profile_set_free(profile_set);
2148 static void userdata_free(struct userdata *u) {
2152 pa_source_unlink(u->source);
2155 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2156 pa_thread_free(u->thread);
2159 pa_thread_mq_done(&u->thread_mq);
2162 pa_source_unref(u->source);
2165 pa_alsa_mixer_pdata_free(u->mixer_pd);
2167 if (u->alsa_rtpoll_item)
2168 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2171 pa_rtpoll_free(u->rtpoll);
2173 if (u->pcm_handle) {
2174 snd_pcm_drop(u->pcm_handle);
2175 snd_pcm_close(u->pcm_handle);
2179 pa_alsa_fdlist_free(u->mixer_fdl);
2181 if (u->mixer_path && !u->mixer_path_set)
2182 pa_alsa_path_free(u->mixer_path);
2184 if (u->mixer_handle)
2185 snd_mixer_close(u->mixer_handle);
2188 pa_smoother_free(u->smoother);
2196 pa_xfree(u->device_name);
2197 pa_xfree(u->control_device);
2198 pa_xfree(u->paths_dir);
2202 void pa_alsa_source_free(pa_source *s) {
2205 pa_source_assert_ref(s);
2206 pa_assert_se(u = s->userdata);