2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <asoundlib.h>
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/volume.h>
33 #include <pulse/xmalloc.h>
35 #include <pulsecore/core.h>
36 #include <pulsecore/i18n.h>
37 #include <pulsecore/module.h>
38 #include <pulsecore/memchunk.h>
39 #include <pulsecore/sink.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/core-rtclock.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/thread-mq.h>
48 #include <pulsecore/rtpoll.h>
49 #include <pulsecore/time-smoother.h>
51 #include <modules/reserve-wrap.h>
53 #include "alsa-util.h"
54 #include "alsa-source.h"
56 /* #define DEBUG_TIMING */
58 #define DEFAULT_DEVICE "default"
60 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
61 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
63 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
64 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
65 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
66 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
67 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
68 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
70 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
73 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
74 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
87 pa_thread_mq thread_mq;
90 snd_pcm_t *pcm_handle;
93 pa_alsa_fdlist *mixer_fdl;
94 pa_alsa_mixer_pdata *mixer_pd;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
99 pa_cvolume hardware_volume;
108 tsched_watermark_ref,
114 watermark_inc_threshold,
115 watermark_dec_threshold;
117 snd_pcm_uframes_t frames_per_block;
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121 pa_usec_t tsched_watermark_usec;
123 char *device_name; /* name of the PCM device */
124 char *control_device; /* name of the control device */
126 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
130 pa_rtpoll_item *alsa_rtpoll_item;
132 pa_smoother *smoother;
134 pa_usec_t smoother_interval;
135 pa_usec_t last_smoother_update;
137 pa_reserve_wrapper *reserve;
138 pa_hook_slot *reserve_slot;
139 pa_reserve_monitor_wrapper *monitor;
140 pa_hook_slot *monitor_slot;
143 pa_alsa_ucm_mapping_context *ucm_context;
146 static void userdata_free(struct userdata *u);
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
152 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
154 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155 return PA_HOOK_CANCEL;
160 static void reserve_done(struct userdata *u) {
163 if (u->reserve_slot) {
164 pa_hook_slot_free(u->reserve_slot);
165 u->reserve_slot = NULL;
169 pa_reserve_wrapper_unref(u->reserve);
174 static void reserve_update(struct userdata *u) {
175 const char *description;
178 if (!u->source || !u->reserve)
181 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
185 static int reserve_init(struct userdata *u, const char *dname) {
194 if (pa_in_system_mode())
197 if (!(rname = pa_alsa_get_reserve_name(dname)))
200 /* We are resuming, try to lock the device */
201 u->reserve = pa_reserve_wrapper_get(u->core, rname);
209 pa_assert(!u->reserve_slot);
210 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
219 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
223 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
230 static void monitor_done(struct userdata *u) {
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
239 pa_reserve_monitor_wrapper_unref(u->monitor);
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
250 if (pa_in_system_mode())
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
256 /* We are resuming, try to lock the device */
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
273 pa_assert(u->use_tsched);
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
285 static void fix_tsched_watermark(struct userdata *u) {
288 pa_assert(u->use_tsched);
290 max_use = u->hwbuf_size - u->hwbuf_unused;
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
298 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
306 pa_assert(u->use_tsched);
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
319 /* Hmm, we cannot increase the watermark any further, hence let's
320 raise the latency unless doing so was disabled in
322 if (u->fixed_latency_range)
325 old_min_latency = u->source->thread_info.min_latency;
326 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
329 if (old_min_latency != new_min_latency) {
330 pa_log_info("Increasing minimal latency to %0.2f ms",
331 (double) new_min_latency / PA_USEC_PER_MSEC);
333 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
336 /* When we reach this we're officially fucked! */
339 static void decrease_watermark(struct userdata *u) {
340 size_t old_watermark;
344 pa_assert(u->use_tsched);
346 now = pa_rtclock_now();
348 if (u->watermark_dec_not_before <= 0)
351 if (u->watermark_dec_not_before > now)
354 old_watermark = u->tsched_watermark;
356 if (u->tsched_watermark < u->watermark_dec_step)
357 u->tsched_watermark = u->tsched_watermark / 2;
359 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
361 fix_tsched_watermark(u);
363 if (old_watermark != u->tsched_watermark)
364 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
367 /* We don't change the latency range*/
370 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
376 pa_assert(sleep_usec);
377 pa_assert(process_usec);
380 pa_assert(u->use_tsched);
382 usec = pa_source_get_requested_latency_within_thread(u->source);
384 if (usec == (pa_usec_t) -1)
385 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
387 wm = u->tsched_watermark_usec;
392 *sleep_usec = usec - wm;
396 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397 (unsigned long) (usec / PA_USEC_PER_MSEC),
398 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
403 static int try_recover(struct userdata *u, const char *call, int err) {
408 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
410 pa_assert(err != -EAGAIN);
413 pa_log_debug("%s: Buffer overrun!", call);
415 if (err == -ESTRPIPE)
416 pa_log_debug("%s: System suspended!", call);
418 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419 pa_log("%s: %s", call, pa_alsa_strerror(err));
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428 size_t left_to_record;
429 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430 bool overrun = false;
432 /* We use <= instead of < for this check here because an overrun
433 * only happens after the last sample was processed, not already when
434 * it is removed from the buffer. This is particularly important
435 * when block transfer is used. */
437 if (n_bytes <= rec_space)
438 left_to_record = rec_space - n_bytes;
441 /* We got a dropout. What a mess! */
449 if (pa_log_ratelimit(PA_LOG_INFO))
450 pa_log_info("Overrun!");
454 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
458 bool reset_not_before = true;
460 if (overrun || left_to_record < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_record > u->watermark_dec_threshold) {
463 reset_not_before = false;
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
470 decrease_watermark(u);
473 if (reset_not_before)
474 u->watermark_dec_not_before = 0;
477 return left_to_record;
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481 bool work_done = false;
482 pa_usec_t max_sleep_usec = 0, process_usec = 0;
483 size_t left_to_record;
487 pa_source_assert_ref(u->source);
490 hw_sleep_time(u, &max_sleep_usec, &process_usec);
496 bool after_avail = true;
498 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
500 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
506 n_bytes = (size_t) n * u->frame_size;
509 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
517 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
519 pa_log_debug("Not reading, because too early.");
524 if (PA_UNLIKELY(n_bytes <= 0)) {
528 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
530 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
537 pa_log_debug("Not reading, because not necessary.");
544 pa_log_debug("Not filling up, because already too many iterations.");
553 pa_log_debug("Reading");
560 const snd_pcm_channel_area_t *areas;
561 snd_pcm_uframes_t offset, frames;
562 snd_pcm_sframes_t sframes;
564 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
567 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
569 if (!after_avail && err == -EAGAIN)
572 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
578 /* Make sure that if these memblocks need to be copied they will fit into one slot */
579 frames = PA_MIN(frames, u->frames_per_block);
581 if (!after_avail && frames == 0)
584 pa_assert(frames > 0);
587 /* Check these are multiples of 8 bit */
588 pa_assert((areas[0].first & 7) == 0);
589 pa_assert((areas[0].step & 7) == 0);
591 /* We assume a single interleaved memory buffer */
592 pa_assert((areas[0].first >> 3) == 0);
593 pa_assert((areas[0].step >> 3) == u->frame_size);
595 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
597 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
598 chunk.length = pa_memblock_get_length(chunk.memblock);
601 pa_source_post(u->source, &chunk);
602 pa_memblock_unref_fixed(chunk.memblock);
604 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
606 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
614 u->read_count += frames * u->frame_size;
617 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
620 if ((size_t) frames * u->frame_size >= n_bytes)
623 n_bytes -= (size_t) frames * u->frame_size;
628 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
629 process_usec = u->tsched_watermark_usec;
631 if (*sleep_usec > process_usec)
632 *sleep_usec -= process_usec;
637 return work_done ? 1 : 0;
640 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
641 int work_done = false;
642 pa_usec_t max_sleep_usec = 0, process_usec = 0;
643 size_t left_to_record;
647 pa_source_assert_ref(u->source);
650 hw_sleep_time(u, &max_sleep_usec, &process_usec);
656 bool after_avail = true;
658 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
660 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
666 n_bytes = (size_t) n * u->frame_size;
667 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
672 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
675 if (PA_UNLIKELY(n_bytes <= 0)) {
679 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
680 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
681 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
682 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
692 pa_log_debug("Not filling up, because already too many iterations.");
702 snd_pcm_sframes_t frames;
705 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
707 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
709 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
710 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
712 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
714 p = pa_memblock_acquire(chunk.memblock);
715 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
716 pa_memblock_release(chunk.memblock);
718 if (PA_UNLIKELY(frames < 0)) {
719 pa_memblock_unref(chunk.memblock);
721 if (!after_avail && (int) frames == -EAGAIN)
724 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
730 if (!after_avail && frames == 0) {
731 pa_memblock_unref(chunk.memblock);
735 pa_assert(frames > 0);
739 chunk.length = (size_t) frames * u->frame_size;
741 pa_source_post(u->source, &chunk);
742 pa_memblock_unref(chunk.memblock);
746 u->read_count += frames * u->frame_size;
748 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
750 if ((size_t) frames * u->frame_size >= n_bytes)
753 n_bytes -= (size_t) frames * u->frame_size;
758 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
759 process_usec = u->tsched_watermark_usec;
761 if (*sleep_usec > process_usec)
762 *sleep_usec -= process_usec;
767 return work_done ? 1 : 0;
770 static void update_smoother(struct userdata *u) {
771 snd_pcm_sframes_t delay = 0;
774 pa_usec_t now1 = 0, now2;
775 snd_pcm_status_t *status;
776 snd_htimestamp_t htstamp = { 0, 0 };
778 snd_pcm_status_alloca(&status);
781 pa_assert(u->pcm_handle);
783 /* Let's update the time smoother */
785 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
786 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
790 snd_pcm_status_get_htstamp(status, &htstamp);
791 now1 = pa_timespec_load(&htstamp);
793 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
795 now1 = pa_rtclock_now();
797 /* check if the time since the last update is bigger than the interval */
798 if (u->last_smoother_update > 0)
799 if (u->last_smoother_update + u->smoother_interval > now1)
802 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
803 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
805 pa_smoother_put(u->smoother, now1, now2);
807 u->last_smoother_update = now1;
808 /* exponentially increase the update interval up to the MAX limit */
809 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
812 static int64_t source_get_latency(struct userdata *u) {
814 pa_usec_t now1, now2;
818 now1 = pa_rtclock_now();
819 now2 = pa_smoother_get(u->smoother, now1);
821 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
826 static int build_pollfd(struct userdata *u) {
828 pa_assert(u->pcm_handle);
830 if (u->alsa_rtpoll_item)
831 pa_rtpoll_item_free(u->alsa_rtpoll_item);
833 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
839 /* Called from IO context */
840 static int suspend(struct userdata *u) {
842 pa_assert(u->pcm_handle);
844 pa_smoother_pause(u->smoother, pa_rtclock_now());
847 snd_pcm_close(u->pcm_handle);
848 u->pcm_handle = NULL;
850 if (u->alsa_rtpoll_item) {
851 pa_rtpoll_item_free(u->alsa_rtpoll_item);
852 u->alsa_rtpoll_item = NULL;
855 pa_log_info("Device suspended...");
860 /* Called from IO context */
861 static int update_sw_params(struct userdata *u) {
862 snd_pcm_uframes_t avail_min;
867 /* Use the full buffer if no one asked us for anything specific */
873 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
876 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
878 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
880 /* We need at least one sample in our buffer */
882 if (PA_UNLIKELY(b < u->frame_size))
885 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
888 fix_min_sleep_wakeup(u);
889 fix_tsched_watermark(u);
892 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
897 pa_usec_t sleep_usec, process_usec;
899 hw_sleep_time(u, &sleep_usec, &process_usec);
900 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
903 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
905 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
906 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
913 /* Called from IO Context on unsuspend or from main thread when creating source */
914 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
916 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
918 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
919 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
921 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
922 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
924 fix_min_sleep_wakeup(u);
925 fix_tsched_watermark(u);
928 pa_source_set_latency_range_within_thread(u->source,
930 pa_bytes_to_usec(u->hwbuf_size, ss));
932 pa_source_set_latency_range(u->source,
934 pa_bytes_to_usec(u->hwbuf_size, ss));
936 /* work-around assert in pa_source_set_latency_within_thead,
937 keep track of min_latency and reuse it when
938 this routine is called from IO context */
939 u->min_latency_ref = u->source->thread_info.min_latency;
942 pa_log_info("Time scheduling watermark is %0.2fms",
943 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
951 snd_pcm_uframes_t period_size, buffer_size;
954 pa_assert(!u->pcm_handle);
956 pa_log_info("Trying resume...");
958 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
960 SND_PCM_NO_AUTO_RESAMPLE|
961 SND_PCM_NO_AUTO_CHANNELS|
962 SND_PCM_NO_AUTO_FORMAT)) < 0) {
963 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
967 ss = u->source->sample_spec;
968 period_size = u->fragment_size / u->frame_size;
969 buffer_size = u->hwbuf_size / u->frame_size;
973 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
974 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
978 if (b != u->use_mmap || d != u->use_tsched) {
979 pa_log_warn("Resume failed, couldn't get original access mode.");
983 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
984 pa_log_warn("Resume failed, couldn't restore original sample settings.");
988 if (period_size*u->frame_size != u->fragment_size ||
989 buffer_size*u->frame_size != u->hwbuf_size) {
990 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
996 if (update_sw_params(u) < 0)
999 if (build_pollfd(u) < 0)
1002 /* FIXME: We need to reload the volume somehow */
1005 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1006 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1007 u->last_smoother_update = 0;
1011 /* reset the watermark to the value defined when source was created */
1013 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1015 pa_log_info("Resumed successfully...");
1020 if (u->pcm_handle) {
1021 snd_pcm_close(u->pcm_handle);
1022 u->pcm_handle = NULL;
1028 /* Called from IO context */
1029 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1030 struct userdata *u = PA_SOURCE(o)->userdata;
1034 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1038 r = source_get_latency(u);
1040 *((int64_t*) data) = r;
1045 case PA_SOURCE_MESSAGE_SET_STATE:
1047 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1049 case PA_SOURCE_SUSPENDED: {
1052 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1054 if ((r = suspend(u)) < 0)
1060 case PA_SOURCE_IDLE:
1061 case PA_SOURCE_RUNNING: {
1064 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1065 if (build_pollfd(u) < 0)
1069 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1070 if ((r = unsuspend(u)) < 0)
1077 case PA_SOURCE_UNLINKED:
1078 case PA_SOURCE_INIT:
1079 case PA_SOURCE_INVALID_STATE:
1086 return pa_source_process_msg(o, code, data, offset, chunk);
1089 /* Called from main context */
1090 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1091 pa_source_state_t old_state;
1094 pa_source_assert_ref(s);
1095 pa_assert_se(u = s->userdata);
1097 old_state = pa_source_get_state(u->source);
1099 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1101 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1102 if (reserve_init(u, u->device_name) < 0)
1103 return -PA_ERR_BUSY;
1108 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1109 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1112 pa_assert(u->mixer_handle);
1114 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1117 if (!PA_SOURCE_IS_LINKED(u->source->state))
1120 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1121 pa_source_set_mixer_dirty(u->source, true);
1125 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1126 pa_source_get_volume(u->source, true);
1127 pa_source_get_mute(u->source, true);
1133 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1134 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1137 pa_assert(u->mixer_handle);
1139 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1142 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1143 pa_source_set_mixer_dirty(u->source, true);
1147 if (mask & SND_CTL_EVENT_MASK_VALUE)
1148 pa_source_update_volume_and_mute(u->source);
1153 static void source_get_volume_cb(pa_source *s) {
1154 struct userdata *u = s->userdata;
1156 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1159 pa_assert(u->mixer_path);
1160 pa_assert(u->mixer_handle);
1162 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1165 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1166 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1168 pa_log_debug("Read hardware volume: %s",
1169 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1171 if (pa_cvolume_equal(&u->hardware_volume, &r))
1174 s->real_volume = u->hardware_volume = r;
1176 /* Hmm, so the hardware volume changed, let's reset our software volume */
1177 if (u->mixer_path->has_dB)
1178 pa_source_set_soft_volume(s, NULL);
1181 static void source_set_volume_cb(pa_source *s) {
1182 struct userdata *u = s->userdata;
1184 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1185 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1188 pa_assert(u->mixer_path);
1189 pa_assert(u->mixer_handle);
1191 /* Shift up by the base volume */
1192 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1194 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1197 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1198 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1200 u->hardware_volume = r;
1202 if (u->mixer_path->has_dB) {
1203 pa_cvolume new_soft_volume;
1204 bool accurate_enough;
1206 /* Match exactly what the user requested by software */
1207 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1209 /* If the adjustment to do in software is only minimal we
1210 * can skip it. That saves us CPU at the expense of a bit of
1213 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1214 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1216 pa_log_debug("Requested volume: %s",
1217 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1218 pa_log_debug("Got hardware volume: %s",
1219 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1220 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1221 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1222 pa_yes_no(accurate_enough));
1224 if (!accurate_enough)
1225 s->soft_volume = new_soft_volume;
1228 pa_log_debug("Wrote hardware volume: %s",
1229 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1231 /* We can't match exactly what the user requested, hence let's
1232 * at least tell the user about it */
1238 static void source_write_volume_cb(pa_source *s) {
1239 struct userdata *u = s->userdata;
1240 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1243 pa_assert(u->mixer_path);
1244 pa_assert(u->mixer_handle);
1245 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1247 /* Shift up by the base volume */
1248 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1250 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1251 pa_log_error("Writing HW volume failed");
1254 bool accurate_enough;
1256 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1259 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1261 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1262 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1264 if (!accurate_enough) {
1265 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1267 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1268 pa_cvolume_snprint_verbose(volume_buf[0],
1269 sizeof(volume_buf[0]),
1270 &s->thread_info.current_hw_volume,
1273 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1278 static int source_get_mute_cb(pa_source *s, bool *mute) {
1279 struct userdata *u = s->userdata;
1282 pa_assert(u->mixer_path);
1283 pa_assert(u->mixer_handle);
1285 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1291 static void source_set_mute_cb(pa_source *s) {
1292 struct userdata *u = s->userdata;
1295 pa_assert(u->mixer_path);
1296 pa_assert(u->mixer_handle);
1298 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1301 static void mixer_volume_init(struct userdata *u) {
1304 if (!u->mixer_path->has_volume) {
1305 pa_source_set_write_volume_callback(u->source, NULL);
1306 pa_source_set_get_volume_callback(u->source, NULL);
1307 pa_source_set_set_volume_callback(u->source, NULL);
1309 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1311 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1312 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1314 if (u->mixer_path->has_dB && u->deferred_volume) {
1315 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1316 pa_log_info("Successfully enabled deferred volume.");
1318 pa_source_set_write_volume_callback(u->source, NULL);
1320 if (u->mixer_path->has_dB) {
1321 pa_source_enable_decibel_volume(u->source, true);
1322 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1324 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1325 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1327 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1329 pa_source_enable_decibel_volume(u->source, false);
1330 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1332 u->source->base_volume = PA_VOLUME_NORM;
1333 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1336 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1339 if (!u->mixer_path->has_mute) {
1340 pa_source_set_get_mute_callback(u->source, NULL);
1341 pa_source_set_set_mute_callback(u->source, NULL);
1342 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1344 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1345 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1346 pa_log_info("Using hardware mute control.");
1350 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1351 struct userdata *u = s->userdata;
1355 pa_assert(u->ucm_context);
1357 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1360 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1361 struct userdata *u = s->userdata;
1362 pa_alsa_port_data *data;
1366 pa_assert(u->mixer_handle);
1368 data = PA_DEVICE_PORT_DATA(p);
1370 pa_assert_se(u->mixer_path = data->path);
1371 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1373 mixer_volume_init(u);
1377 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1378 if (s->write_volume)
1388 static void source_update_requested_latency_cb(pa_source *s) {
1389 struct userdata *u = s->userdata;
1391 pa_assert(u->use_tsched); /* only when timer scheduling is used
1392 * we can dynamically adjust the
1398 update_sw_params(u);
1401 static int source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1402 struct userdata *u = s->userdata;
1404 bool supported = false;
1406 /* FIXME: we only update rate for now */
1410 for (i = 0; u->rates[i]; i++) {
1411 if (u->rates[i] == spec->rate) {
1418 pa_log_info("Source does not support sample rate of %d Hz", spec->rate);
1422 if (!PA_SOURCE_IS_OPENED(s->state)) {
1423 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, spec->rate);
1424 u->source->sample_spec.rate = spec->rate;
1431 static void thread_func(void *userdata) {
1432 struct userdata *u = userdata;
1433 unsigned short revents = 0;
1437 pa_log_debug("Thread starting up");
1439 if (u->core->realtime_scheduling)
1440 pa_make_realtime(u->core->realtime_priority);
1442 pa_thread_mq_install(&u->thread_mq);
1446 pa_usec_t rtpoll_sleep = 0, real_sleep;
1449 pa_log_debug("Loop");
1452 /* Read some data and pass it to the sources */
1453 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1455 pa_usec_t sleep_usec = 0;
1456 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1459 pa_log_info("Starting capture.");
1460 snd_pcm_start(u->pcm_handle);
1462 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1468 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1470 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1475 /* pa_log_debug("work_done = %i", work_done); */
1480 if (u->use_tsched) {
1483 /* OK, the capture buffer is now empty, let's
1484 * calculate when to wake up next */
1486 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1488 /* Convert from the sound card time domain to the
1489 * system time domain */
1490 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1492 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1494 /* We don't trust the conversion, so we wake up whatever comes first */
1495 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1499 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1500 pa_usec_t volume_sleep;
1501 pa_source_volume_change_apply(u->source, &volume_sleep);
1502 if (volume_sleep > 0) {
1503 if (rtpoll_sleep > 0)
1504 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1506 rtpoll_sleep = volume_sleep;
1510 if (rtpoll_sleep > 0) {
1511 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1512 real_sleep = pa_rtclock_now();
1515 pa_rtpoll_set_timer_disabled(u->rtpoll);
1517 /* Hmm, nothing to do. Let's sleep */
1518 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1521 if (rtpoll_sleep > 0) {
1522 real_sleep = pa_rtclock_now() - real_sleep;
1524 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1525 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1526 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1528 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1529 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1530 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1531 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1534 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1535 pa_source_volume_change_apply(u->source, NULL);
1540 /* Tell ALSA about this and process its response */
1541 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1542 struct pollfd *pollfd;
1546 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1548 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1549 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1553 if (revents & ~POLLIN) {
1554 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1559 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1560 pa_log_debug("Wakeup from ALSA!");
1567 /* If this was no regular exit from the loop we have to continue
1568 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1569 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1570 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1573 pa_log_debug("Thread shutting down");
1576 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1582 pa_assert(device_name);
1584 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1585 pa_source_new_data_set_name(data, n);
1586 data->namereg_fail = true;
1590 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1591 data->namereg_fail = true;
1593 n = device_id ? device_id : device_name;
1594 data->namereg_fail = false;
1598 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1600 t = pa_sprintf_malloc("alsa_input.%s", n);
1602 pa_source_new_data_set_name(data, t);
1606 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1607 if (!mapping && !element)
1610 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1611 pa_log_info("Failed to find a working mixer device.");
1617 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1620 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1623 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1624 pa_alsa_path_dump(u->mixer_path);
1625 } else if (!(u->mixer_path_set = mapping->input_path_set))
1632 if (u->mixer_path) {
1633 pa_alsa_path_free(u->mixer_path);
1634 u->mixer_path = NULL;
1637 if (u->mixer_handle) {
1638 snd_mixer_close(u->mixer_handle);
1639 u->mixer_handle = NULL;
1643 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1644 bool need_mixer_callback = false;
1648 if (!u->mixer_handle)
1651 if (u->source->active_port) {
1652 pa_alsa_port_data *data;
1654 /* We have a list of supported paths, so let's activate the
1655 * one that has been chosen as active */
1657 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1658 u->mixer_path = data->path;
1660 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1664 if (!u->mixer_path && u->mixer_path_set)
1665 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1667 if (u->mixer_path) {
1668 /* Hmm, we have only a single path, then let's activate it */
1670 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1675 mixer_volume_init(u);
1677 /* Will we need to register callbacks? */
1678 if (u->mixer_path_set && u->mixer_path_set->paths) {
1682 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1683 if (p->has_volume || p->has_mute)
1684 need_mixer_callback = true;
1687 else if (u->mixer_path)
1688 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1690 if (need_mixer_callback) {
1691 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1692 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1693 u->mixer_pd = pa_alsa_mixer_pdata_new();
1694 mixer_callback = io_mixer_callback;
1696 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1697 pa_log("Failed to initialize file descriptor monitoring");
1701 u->mixer_fdl = pa_alsa_fdlist_new();
1702 mixer_callback = ctl_mixer_callback;
1704 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1705 pa_log("Failed to initialize file descriptor monitoring");
1710 if (u->mixer_path_set)
1711 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1713 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1719 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1721 struct userdata *u = NULL;
1722 const char *dev_id = NULL, *key, *mod_name;
1724 char *thread_name = NULL;
1725 uint32_t alternate_sample_rate;
1727 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1728 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1730 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1731 pa_source_new_data data;
1734 pa_alsa_profile_set *profile_set = NULL;
1740 ss = m->core->default_sample_spec;
1741 map = m->core->default_channel_map;
1743 /* Pick sample spec overrides from the mapping, if any */
1745 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1746 ss.format = mapping->sample_spec.format;
1747 if (mapping->sample_spec.rate != 0)
1748 ss.rate = mapping->sample_spec.rate;
1749 if (mapping->sample_spec.channels != 0) {
1750 ss.channels = mapping->sample_spec.channels;
1751 if (pa_channel_map_valid(&mapping->channel_map))
1752 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1756 /* Override with modargs if provided */
1757 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1758 pa_log("Failed to parse sample specification and channel map");
1762 alternate_sample_rate = m->core->alternate_sample_rate;
1763 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1764 pa_log("Failed to parse alternate sample rate");
1768 frame_size = pa_frame_size(&ss);
1770 nfrags = m->core->default_n_fragments;
1771 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1773 frag_size = (uint32_t) frame_size;
1774 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1775 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1777 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1778 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1779 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1780 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1781 pa_log("Failed to parse buffer metrics");
1785 buffer_size = nfrags * frag_size;
1787 period_frames = frag_size/frame_size;
1788 buffer_frames = buffer_size/frame_size;
1789 tsched_frames = tsched_size/frame_size;
1791 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1792 pa_log("Failed to parse mmap argument.");
1796 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1797 pa_log("Failed to parse tsched argument.");
1801 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1802 pa_log("Failed to parse ignore_dB argument.");
1806 deferred_volume = m->core->deferred_volume;
1807 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1808 pa_log("Failed to parse deferred_volume argument.");
1812 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1813 pa_log("Failed to parse fixed_latency_range argument.");
1817 use_tsched = pa_alsa_may_tsched(use_tsched);
1819 u = pa_xnew0(struct userdata, 1);
1822 u->use_mmap = use_mmap;
1823 u->use_tsched = use_tsched;
1824 u->deferred_volume = deferred_volume;
1825 u->fixed_latency_range = fixed_latency_range;
1827 u->rtpoll = pa_rtpoll_new();
1829 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
1830 pa_log("pa_thread_mq_init() failed.");
1834 u->smoother = pa_smoother_new(
1835 SMOOTHER_ADJUST_USEC,
1836 SMOOTHER_WINDOW_USEC,
1842 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1845 if (mapping && mapping->ucm_context.ucm)
1846 u->ucm_context = &mapping->ucm_context;
1848 dev_id = pa_modargs_get_value(
1850 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1852 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1854 if (reserve_init(u, dev_id) < 0)
1857 if (reserve_monitor_init(u, dev_id) < 0)
1863 /* Force ALSA to reread its configuration if module-alsa-card didn't
1864 * do it for us. This matters if our device was hot-plugged after ALSA
1865 * has already read its configuration - see
1866 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
1870 snd_config_update_free_global();
1874 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1875 pa_log("device_id= not set");
1879 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1880 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1881 pa_log("Failed to enable ucm modifier %s", mod_name);
1883 pa_log_debug("Enabled ucm modifier %s", mod_name);
1886 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1890 SND_PCM_STREAM_CAPTURE,
1891 &period_frames, &buffer_frames, tsched_frames,
1895 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1897 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1900 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1904 SND_PCM_STREAM_CAPTURE,
1905 &period_frames, &buffer_frames, tsched_frames,
1906 &b, &d, profile_set, &mapping)))
1911 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1912 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1915 SND_PCM_STREAM_CAPTURE,
1916 &period_frames, &buffer_frames, tsched_frames,
1921 pa_assert(u->device_name);
1922 pa_log_info("Successfully opened device %s.", u->device_name);
1924 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1925 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1930 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1932 if (use_mmap && !b) {
1933 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1934 u->use_mmap = use_mmap = false;
1937 if (use_tsched && (!b || !d)) {
1938 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1939 u->use_tsched = use_tsched = false;
1943 pa_log_info("Successfully enabled mmap() mode.");
1945 if (u->use_tsched) {
1946 pa_log_info("Successfully enabled timer-based scheduling mode.");
1947 if (u->fixed_latency_range)
1948 pa_log_info("Disabling latency range changes on overrun");
1951 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1953 pa_log_error("Failed to find any supported sample rates.");
1957 /* ALSA might tweak the sample spec, so recalculate the frame size */
1958 frame_size = pa_frame_size(&ss);
1960 if (!u->ucm_context)
1961 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1963 pa_source_new_data_init(&data);
1964 data.driver = driver;
1967 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1969 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1970 * variable instead of using &data.namereg_fail directly, because
1971 * data.namereg_fail is a bitfield and taking the address of a bitfield
1972 * variable is impossible. */
1973 namereg_fail = data.namereg_fail;
1974 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1975 pa_log("Failed to parse namereg_fail argument.");
1976 pa_source_new_data_done(&data);
1979 data.namereg_fail = namereg_fail;
1981 pa_source_new_data_set_sample_spec(&data, &ss);
1982 pa_source_new_data_set_channel_map(&data, &map);
1983 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1985 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1986 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1987 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1988 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1989 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1992 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1993 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1995 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1996 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1999 pa_alsa_init_description(data.proplist, card);
2001 if (u->control_device)
2002 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2004 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2005 pa_log("Invalid properties");
2006 pa_source_new_data_done(&data);
2011 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
2012 else if (u->mixer_path_set)
2013 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2015 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2016 volume_is_set = data.volume_is_set;
2017 mute_is_set = data.muted_is_set;
2018 pa_source_new_data_done(&data);
2021 pa_log("Failed to create source object");
2025 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2026 &u->source->thread_info.volume_change_safety_margin) < 0) {
2027 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2031 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2032 &u->source->thread_info.volume_change_extra_delay) < 0) {
2033 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2037 u->source->parent.process_msg = source_process_msg;
2039 u->source->update_requested_latency = source_update_requested_latency_cb;
2040 u->source->set_state = source_set_state_cb;
2042 u->source->set_port = source_set_port_ucm_cb;
2044 u->source->set_port = source_set_port_cb;
2045 if (u->source->alternate_sample_rate)
2046 u->source->reconfigure = source_reconfigure_cb;
2047 u->source->userdata = u;
2049 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2050 pa_source_set_rtpoll(u->source, u->rtpoll);
2052 u->frame_size = frame_size;
2053 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2054 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2055 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2056 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2058 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2059 (double) u->hwbuf_size / (double) u->fragment_size,
2060 (long unsigned) u->fragment_size,
2061 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2062 (long unsigned) u->hwbuf_size,
2063 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2065 if (u->use_tsched) {
2066 u->tsched_watermark_ref = tsched_watermark;
2067 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2070 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2074 if (update_sw_params(u) < 0)
2077 if (u->ucm_context) {
2078 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2080 } else if (setup_mixer(u, ignore_dB) < 0)
2083 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2085 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2086 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2087 pa_log("Failed to create thread.");
2090 pa_xfree(thread_name);
2093 /* Get initial mixer settings */
2094 if (volume_is_set) {
2095 if (u->source->set_volume)
2096 u->source->set_volume(u->source);
2098 if (u->source->get_volume)
2099 u->source->get_volume(u->source);
2103 if (u->source->set_mute)
2104 u->source->set_mute(u->source);
2106 if (u->source->get_mute) {
2109 if (u->source->get_mute(u->source, &mute) >= 0)
2110 pa_source_set_mute(u->source, mute, false);
2114 if ((volume_is_set || mute_is_set) && u->source->write_volume)
2115 u->source->write_volume(u->source);
2117 pa_source_put(u->source);
2120 pa_alsa_profile_set_free(profile_set);
2125 pa_xfree(thread_name);
2131 pa_alsa_profile_set_free(profile_set);
2136 static void userdata_free(struct userdata *u) {
2140 pa_source_unlink(u->source);
2143 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2144 pa_thread_free(u->thread);
2147 pa_thread_mq_done(&u->thread_mq);
2150 pa_source_unref(u->source);
2153 pa_alsa_mixer_pdata_free(u->mixer_pd);
2155 if (u->alsa_rtpoll_item)
2156 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2159 pa_rtpoll_free(u->rtpoll);
2161 if (u->pcm_handle) {
2162 snd_pcm_drop(u->pcm_handle);
2163 snd_pcm_close(u->pcm_handle);
2167 pa_alsa_fdlist_free(u->mixer_fdl);
2169 if (u->mixer_path && !u->mixer_path_set)
2170 pa_alsa_path_free(u->mixer_path);
2172 if (u->mixer_handle)
2173 snd_mixer_close(u->mixer_handle);
2176 pa_smoother_free(u->smoother);
2184 pa_xfree(u->device_name);
2185 pa_xfree(u->control_device);
2186 pa_xfree(u->paths_dir);
2190 void pa_alsa_source_free(pa_source *s) {
2193 pa_source_assert_ref(s);
2194 pa_assert_se(u = s->userdata);