2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <alsa/asoundlib.h>
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
52 #include <modules/reserve-wrap.h>
54 #include "alsa-util.h"
55 #include "alsa-source.h"
57 /* #define DEBUG_TIMING */
59 #define DEFAULT_DEVICE "default"
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
75 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
88 pa_thread_mq thread_mq;
91 snd_pcm_t *pcm_handle;
94 pa_alsa_fdlist *mixer_fdl;
95 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
101 pa_cvolume hardware_volume;
103 pa_sample_spec verified_sample_spec;
104 pa_sample_format_t *supported_formats;
105 unsigned int *supported_rates;
107 size_t fragment_size;
110 size_t tsched_watermark;
119 tsched_watermark_ref,
125 watermark_inc_threshold,
126 watermark_dec_threshold;
128 snd_pcm_uframes_t frames_per_block;
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
137 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
141 pa_rtpoll_item *alsa_rtpoll_item;
143 pa_smoother *smoother;
145 pa_usec_t smoother_interval;
146 pa_usec_t last_smoother_update;
148 pa_reserve_wrapper *reserve;
149 pa_hook_slot *reserve_slot;
150 pa_reserve_monitor_wrapper *monitor;
151 pa_hook_slot *monitor_slot;
154 pa_alsa_ucm_mapping_context *ucm_context;
158 SOURCE_MESSAGE_SYNC_MIXER = PA_SOURCE_MESSAGE_MAX
161 static void userdata_free(struct userdata *u);
162 static int unsuspend(struct userdata *u, bool recovering);
164 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
168 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
170 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
171 return PA_HOOK_CANCEL;
176 static void reserve_done(struct userdata *u) {
179 if (u->reserve_slot) {
180 pa_hook_slot_free(u->reserve_slot);
181 u->reserve_slot = NULL;
185 pa_reserve_wrapper_unref(u->reserve);
190 static void reserve_update(struct userdata *u) {
191 const char *description;
194 if (!u->source || !u->reserve)
197 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
198 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
201 static int reserve_init(struct userdata *u, const char *dname) {
210 if (pa_in_system_mode())
213 if (!(rname = pa_alsa_get_reserve_name(dname)))
216 /* We are resuming, try to lock the device */
217 u->reserve = pa_reserve_wrapper_get(u->core, rname);
225 pa_assert(!u->reserve_slot);
226 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
231 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
235 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
236 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
237 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
239 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
240 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
246 static void monitor_done(struct userdata *u) {
249 if (u->monitor_slot) {
250 pa_hook_slot_free(u->monitor_slot);
251 u->monitor_slot = NULL;
255 pa_reserve_monitor_wrapper_unref(u->monitor);
260 static int reserve_monitor_init(struct userdata *u, const char *dname) {
266 if (pa_in_system_mode())
269 if (!(rname = pa_alsa_get_reserve_name(dname)))
272 /* We are resuming, try to lock the device */
273 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
279 pa_assert(!u->monitor_slot);
280 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
285 static void fix_min_sleep_wakeup(struct userdata *u) {
286 size_t max_use, max_use_2;
289 pa_assert(u->use_tsched);
291 max_use = u->hwbuf_size - u->hwbuf_unused;
292 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
294 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
295 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
297 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
298 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
301 static void fix_tsched_watermark(struct userdata *u) {
304 pa_assert(u->use_tsched);
306 max_use = u->hwbuf_size - u->hwbuf_unused;
308 if (u->tsched_watermark > max_use - u->min_sleep)
309 u->tsched_watermark = max_use - u->min_sleep;
311 if (u->tsched_watermark < u->min_wakeup)
312 u->tsched_watermark = u->min_wakeup;
314 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
317 static void increase_watermark(struct userdata *u) {
318 size_t old_watermark;
319 pa_usec_t old_min_latency, new_min_latency;
322 pa_assert(u->use_tsched);
324 /* First, just try to increase the watermark */
325 old_watermark = u->tsched_watermark;
326 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
327 fix_tsched_watermark(u);
329 if (old_watermark != u->tsched_watermark) {
330 pa_log_info("Increasing wakeup watermark to %0.2f ms",
331 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
335 /* Hmm, we cannot increase the watermark any further, hence let's
336 raise the latency unless doing so was disabled in
338 if (u->fixed_latency_range)
341 old_min_latency = u->source->thread_info.min_latency;
342 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
343 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
345 if (old_min_latency != new_min_latency) {
346 pa_log_info("Increasing minimal latency to %0.2f ms",
347 (double) new_min_latency / PA_USEC_PER_MSEC);
349 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
352 /* When we reach this we're officially fucked! */
355 static void decrease_watermark(struct userdata *u) {
356 size_t old_watermark;
360 pa_assert(u->use_tsched);
362 now = pa_rtclock_now();
364 if (u->watermark_dec_not_before <= 0)
367 if (u->watermark_dec_not_before > now)
370 old_watermark = u->tsched_watermark;
372 if (u->tsched_watermark < u->watermark_dec_step)
373 u->tsched_watermark = u->tsched_watermark / 2;
375 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
377 fix_tsched_watermark(u);
379 if (old_watermark != u->tsched_watermark)
380 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
381 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
383 /* We don't change the latency range*/
386 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
389 /* Called from IO Context on unsuspend or from main thread when creating source */
390 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
392 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
394 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
395 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
397 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
398 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
400 fix_min_sleep_wakeup(u);
401 fix_tsched_watermark(u);
404 pa_source_set_latency_range_within_thread(u->source,
406 pa_bytes_to_usec(u->hwbuf_size, ss));
408 pa_source_set_latency_range(u->source,
410 pa_bytes_to_usec(u->hwbuf_size, ss));
412 /* work-around assert in pa_source_set_latency_within_thead,
413 keep track of min_latency and reuse it when
414 this routine is called from IO context */
415 u->min_latency_ref = u->source->thread_info.min_latency;
418 pa_log_info("Time scheduling watermark is %0.2fms",
419 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
422 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
425 pa_assert(sleep_usec);
426 pa_assert(process_usec);
429 pa_assert(u->use_tsched);
431 usec = pa_source_get_requested_latency_within_thread(u->source);
433 if (usec == (pa_usec_t) -1)
434 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
436 wm = u->tsched_watermark_usec;
441 *sleep_usec = usec - wm;
445 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
446 (unsigned long) (usec / PA_USEC_PER_MSEC),
447 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
448 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
452 /* Reset smoother and counters */
453 static void reset_vars(struct userdata *u) {
455 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
456 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
457 u->last_smoother_update = 0;
463 /* Called from IO context */
464 static void close_pcm(struct userdata *u) {
465 pa_smoother_pause(u->smoother, pa_rtclock_now());
468 snd_pcm_close(u->pcm_handle);
469 u->pcm_handle = NULL;
471 if (u->alsa_rtpoll_item) {
472 pa_rtpoll_item_free(u->alsa_rtpoll_item);
473 u->alsa_rtpoll_item = NULL;
477 static int try_recover(struct userdata *u, const char *call, int err) {
482 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
484 pa_assert(err != -EAGAIN);
487 pa_log_debug("%s: Buffer overrun!", call);
489 if (err == -ESTRPIPE)
490 pa_log_debug("%s: System suspended!", call);
492 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
493 pa_log("%s: %s, trying to restart PCM", call, pa_alsa_strerror(err));
495 /* As a last measure, restart the PCM and inform the caller about it. */
497 if (unsuspend(u, true) < 0)
507 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
508 size_t left_to_record;
509 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
510 bool overrun = false;
512 /* We use <= instead of < for this check here because an overrun
513 * only happens after the last sample was processed, not already when
514 * it is removed from the buffer. This is particularly important
515 * when block transfer is used. */
517 if (n_bytes <= rec_space)
518 left_to_record = rec_space - n_bytes;
521 /* We got a dropout. What a mess! */
529 if (pa_log_ratelimit(PA_LOG_INFO))
530 pa_log_info("Overrun!");
534 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
538 bool reset_not_before = true;
540 if (overrun || left_to_record < u->watermark_inc_threshold)
541 increase_watermark(u);
542 else if (left_to_record > u->watermark_dec_threshold) {
543 reset_not_before = false;
545 /* We decrease the watermark only if have actually
546 * been woken up by a timeout. If something else woke
547 * us up it's too easy to fulfill the deadlines... */
550 decrease_watermark(u);
553 if (reset_not_before)
554 u->watermark_dec_not_before = 0;
557 return left_to_record;
560 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
561 bool work_done = false;
562 bool recovery_done = false;
563 pa_usec_t max_sleep_usec = 0, process_usec = 0;
564 size_t left_to_record;
568 pa_source_assert_ref(u->source);
571 hw_sleep_time(u, &max_sleep_usec, &process_usec);
577 bool after_avail = true;
579 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
581 recovery_done = true;
582 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
588 n_bytes = (size_t) n * u->frame_size;
591 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
594 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
599 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
601 pa_log_debug("Not reading, because too early.");
606 if (PA_UNLIKELY(n_bytes <= 0)) {
610 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
611 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
612 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
613 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
619 pa_log_debug("Not reading, because not necessary.");
626 pa_log_debug("Not filling up, because already too many iterations.");
635 pa_log_debug("Reading");
642 const snd_pcm_channel_area_t *areas;
643 snd_pcm_uframes_t offset, frames;
644 snd_pcm_sframes_t sframes;
646 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
647 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
649 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
651 if (!after_avail && err == -EAGAIN)
654 recovery_done = true;
655 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
664 /* Make sure that if these memblocks need to be copied they will fit into one slot */
665 frames = PA_MIN(frames, u->frames_per_block);
667 if (!after_avail && frames == 0)
670 pa_assert(frames > 0);
673 /* Check these are multiples of 8 bit */
674 pa_assert((areas[0].first & 7) == 0);
675 pa_assert((areas[0].step & 7) == 0);
677 /* We assume a single interleaved memory buffer */
678 pa_assert((areas[0].first >> 3) == 0);
679 pa_assert((areas[0].step >> 3) == u->frame_size);
681 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
683 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
684 chunk.length = pa_memblock_get_length(chunk.memblock);
687 pa_source_post(u->source, &chunk);
688 pa_memblock_unref_fixed(chunk.memblock);
690 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
692 recovery_done = true;
693 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
704 u->read_count += frames * u->frame_size;
707 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
710 if ((size_t) frames * u->frame_size >= n_bytes)
713 n_bytes -= (size_t) frames * u->frame_size;
718 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
719 process_usec = u->tsched_watermark_usec;
721 if (*sleep_usec > process_usec)
722 *sleep_usec -= process_usec;
726 /* If the PCM was recovered, it may need restarting. Reduce the sleep time
727 * to 0 to ensure immediate restart. */
732 return work_done ? 1 : 0;
735 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
736 int work_done = false;
737 bool recovery_done = false;
738 pa_usec_t max_sleep_usec = 0, process_usec = 0;
739 size_t left_to_record;
743 pa_source_assert_ref(u->source);
746 hw_sleep_time(u, &max_sleep_usec, &process_usec);
752 bool after_avail = true;
754 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
756 recovery_done = true;
757 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
763 n_bytes = (size_t) n * u->frame_size;
764 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
769 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
772 if (PA_UNLIKELY(n_bytes <= 0)) {
776 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
777 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
778 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
779 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
789 pa_log_debug("Not filling up, because already too many iterations.");
799 snd_pcm_sframes_t frames;
802 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
804 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
806 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
807 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
809 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
811 p = pa_memblock_acquire(chunk.memblock);
812 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
813 pa_memblock_release(chunk.memblock);
815 if (PA_UNLIKELY(frames < 0)) {
816 pa_memblock_unref(chunk.memblock);
818 if (!after_avail && (int) frames == -EAGAIN)
821 recovery_done = true;
822 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
831 if (!after_avail && frames == 0) {
832 pa_memblock_unref(chunk.memblock);
836 pa_assert(frames > 0);
840 chunk.length = (size_t) frames * u->frame_size;
842 pa_source_post(u->source, &chunk);
843 pa_memblock_unref(chunk.memblock);
847 u->read_count += frames * u->frame_size;
849 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
851 if ((size_t) frames * u->frame_size >= n_bytes)
854 n_bytes -= (size_t) frames * u->frame_size;
859 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
860 process_usec = u->tsched_watermark_usec;
862 if (*sleep_usec > process_usec)
863 *sleep_usec -= process_usec;
867 /* If the PCM was recovered, it may need restarting. Reduce the sleep time
868 * to 0 to ensure immediate restart. */
873 return work_done ? 1 : 0;
876 static void update_smoother(struct userdata *u) {
877 snd_pcm_sframes_t delay = 0;
880 pa_usec_t now1 = 0, now2;
881 snd_pcm_status_t *status;
882 snd_htimestamp_t htstamp = { 0, 0 };
884 snd_pcm_status_alloca(&status);
887 pa_assert(u->pcm_handle);
889 /* Let's update the time smoother */
891 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
892 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
896 snd_pcm_status_get_htstamp(status, &htstamp);
897 now1 = pa_timespec_load(&htstamp);
899 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
901 now1 = pa_rtclock_now();
903 /* check if the time since the last update is bigger than the interval */
904 if (u->last_smoother_update > 0)
905 if (u->last_smoother_update + u->smoother_interval > now1)
908 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
909 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
911 pa_smoother_put(u->smoother, now1, now2);
913 u->last_smoother_update = now1;
914 /* exponentially increase the update interval up to the MAX limit */
915 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
918 static int64_t source_get_latency(struct userdata *u) {
920 pa_usec_t now1, now2;
924 now1 = pa_rtclock_now();
925 now2 = pa_smoother_get(u->smoother, now1);
927 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
932 static int build_pollfd(struct userdata *u) {
934 pa_assert(u->pcm_handle);
936 if (u->alsa_rtpoll_item)
937 pa_rtpoll_item_free(u->alsa_rtpoll_item);
939 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
945 /* Called from IO context */
946 static void suspend(struct userdata *u) {
949 /* PCM may have been invalidated due to device failure.
950 * In that case, there is nothing to do. */
954 /* Close PCM device */
957 pa_log_info("Device suspended...");
960 /* Called from IO context */
961 static int update_sw_params(struct userdata *u) {
962 snd_pcm_uframes_t avail_min;
967 /* Use the full buffer if no one asked us for anything specific */
973 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
976 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
978 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
980 /* We need at least one sample in our buffer */
982 if (PA_UNLIKELY(b < u->frame_size))
985 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
988 fix_min_sleep_wakeup(u);
989 fix_tsched_watermark(u);
992 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
997 pa_usec_t sleep_usec, process_usec;
999 hw_sleep_time(u, &sleep_usec, &process_usec);
1000 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
1003 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1005 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1006 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1013 /* Called from IO Context on unsuspend */
1014 static void update_size(struct userdata *u, pa_sample_spec *ss) {
1018 u->frame_size = pa_frame_size(ss);
1019 u->frames_per_block = pa_mempool_block_size_max(u->core->mempool) / u->frame_size;
1021 /* use initial values including module arguments */
1022 u->fragment_size = u->initial_info.fragment_size;
1023 u->hwbuf_size = u->initial_info.nfrags * u->fragment_size;
1024 u->tsched_size = u->initial_info.tsched_size;
1025 u->tsched_watermark = u->initial_info.tsched_watermark;
1027 u->tsched_watermark_ref = u->tsched_watermark;
1029 pa_log_info("Updated frame_size %zu, frames_per_block %lu, fragment_size %zu, hwbuf_size %zu, tsched(size %zu, watermark %zu)",
1030 u->frame_size, (unsigned long) u->frames_per_block, u->fragment_size, u->hwbuf_size, u->tsched_size, u->tsched_watermark);
1033 /* Called from IO context */
1034 static int unsuspend(struct userdata *u, bool recovering) {
1038 snd_pcm_uframes_t period_frames, buffer_frames;
1039 snd_pcm_uframes_t tsched_frames = 0;
1040 bool frame_size_changed = false;
1043 pa_assert(!u->pcm_handle);
1045 pa_log_info("Trying resume...");
1048 * On some machines, during the system suspend and resume, the thread_func could receive
1049 * POLLERR events before the dev nodes in /dev/snd/ are accessible, and thread_func calls
1050 * the unsuspend() to try to recover the PCM, this will make the snd_pcm_open() fail, here
1051 * we add msleep and retry to make sure those nodes are accessible.
1053 for (i = 0; i < 4; i++) {
1054 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
1056 SND_PCM_NO_AUTO_RESAMPLE|
1057 SND_PCM_NO_AUTO_CHANNELS|
1058 SND_PCM_NO_AUTO_FORMAT)) < 0 && recovering)
1065 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1069 if (pa_frame_size(&u->source->sample_spec) != u->frame_size) {
1070 update_size(u, &u->source->sample_spec);
1071 tsched_frames = u->tsched_size / u->frame_size;
1072 frame_size_changed = true;
1075 ss = u->source->sample_spec;
1076 period_frames = u->fragment_size / u->frame_size;
1077 buffer_frames = u->hwbuf_size / u->frame_size;
1081 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_frames, &buffer_frames, tsched_frames, &b, &d, true)) < 0) {
1082 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1086 if (b != u->use_mmap || d != u->use_tsched) {
1087 pa_log_warn("Resume failed, couldn't get original access mode.");
1091 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
1092 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1096 if (frame_size_changed) {
1097 u->fragment_size = (size_t)(period_frames * u->frame_size);
1098 u->hwbuf_size = (size_t)(buffer_frames * u->frame_size);
1099 pa_proplist_setf(u->source->proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%zu", u->hwbuf_size);
1100 pa_proplist_setf(u->source->proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%zu", u->fragment_size);
1102 } else if (period_frames * u->frame_size != u->fragment_size ||
1103 buffer_frames * u->frame_size != u->hwbuf_size) {
1104 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %zu/%zu, New %lu/%lu)",
1105 u->hwbuf_size, u->fragment_size,
1106 (unsigned long) buffer_frames * u->frame_size, (unsigned long) period_frames * u->frame_size);
1110 if (update_sw_params(u) < 0)
1113 if (build_pollfd(u) < 0)
1116 /* FIXME: We need to reload the volume somehow */
1120 /* reset the watermark to the value defined when source was created */
1121 if (u->use_tsched && !recovering)
1122 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1124 pa_log_info("Resumed successfully...");
1129 if (u->pcm_handle) {
1130 snd_pcm_close(u->pcm_handle);
1131 u->pcm_handle = NULL;
1137 /* Called from the IO thread or the main thread depending on whether deferred
1138 * volume is enabled or not (with deferred volume all mixer handling is done
1139 * from the IO thread).
1141 * Sets the mixer settings to match the current source and port state (the port
1142 * is given as an argument, because active_port may still point to the old
1143 * port, if we're switching ports). */
1144 static void sync_mixer(struct userdata *u, pa_device_port *port) {
1145 pa_alsa_setting *setting = NULL;
1152 /* port may be NULL, because if we use a synthesized mixer path, then the
1153 * source has no ports. */
1154 if (port && !u->ucm_context) {
1155 pa_alsa_port_data *data;
1157 data = PA_DEVICE_PORT_DATA(port);
1158 setting = data->setting;
1161 pa_alsa_path_select(u->mixer_path, setting, u->mixer_handle, u->source->muted);
1163 if (u->source->set_mute)
1164 u->source->set_mute(u->source);
1165 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1166 if (u->source->write_volume)
1167 u->source->write_volume(u->source);
1169 if (u->source->set_volume)
1170 u->source->set_volume(u->source);
1174 /* Called from IO context */
1175 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1176 struct userdata *u = PA_SOURCE(o)->userdata;
1180 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1184 r = source_get_latency(u);
1186 *((int64_t*) data) = r;
1191 case SOURCE_MESSAGE_SYNC_MIXER: {
1192 pa_device_port *port = data;
1194 sync_mixer(u, port);
1199 return pa_source_process_msg(o, code, data, offset, chunk);
1202 /* Called from main context */
1203 static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1204 pa_source_state_t old_state;
1207 pa_source_assert_ref(s);
1208 pa_assert_se(u = s->userdata);
1210 /* When our session becomes active, we need to sync the mixer, because
1211 * another user may have changed the mixer settings.
1213 * If deferred volume is enabled, the syncing is done in the
1214 * set_state_in_io_thread() callback instead. */
1215 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME)
1216 && (s->suspend_cause & PA_SUSPEND_SESSION)
1217 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1218 sync_mixer(u, s->active_port);
1220 old_state = u->source->state;
1222 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1224 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1225 if (reserve_init(u, u->device_name) < 0)
1226 return -PA_ERR_BUSY;
1231 /* Called from the IO thread. */
1232 static int source_set_state_in_io_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1236 pa_assert_se(u = s->userdata);
1238 /* When our session becomes active, we need to sync the mixer, because
1239 * another user may have changed the mixer settings.
1241 * If deferred volume is disabled, the syncing is done in the
1242 * set_state_in_main_thread() callback instead. */
1243 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME)
1244 && (s->suspend_cause & PA_SUSPEND_SESSION)
1245 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1246 sync_mixer(u, s->active_port);
1248 /* It may be that only the suspend cause is changing, in which case there's
1249 * nothing more to do. */
1250 if (new_state == s->thread_info.state)
1253 switch (new_state) {
1255 case PA_SOURCE_SUSPENDED: {
1256 pa_assert(PA_SOURCE_IS_OPENED(s->thread_info.state));
1263 case PA_SOURCE_IDLE:
1264 case PA_SOURCE_RUNNING: {
1267 if (s->thread_info.state == PA_SOURCE_INIT) {
1268 if (build_pollfd(u) < 0)
1269 /* FIXME: This will cause an assertion failure, because
1270 * with the current design pa_source_put() is not allowed
1271 * to fail and pa_source_put() has no fallback code that
1272 * would start the source suspended if opening the device
1277 if (s->thread_info.state == PA_SOURCE_SUSPENDED) {
1278 if ((r = unsuspend(u, false)) < 0)
1285 case PA_SOURCE_UNLINKED:
1286 case PA_SOURCE_INIT:
1287 case PA_SOURCE_INVALID_STATE:
1294 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1295 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1298 pa_assert(u->mixer_handle);
1300 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1303 if (!PA_SOURCE_IS_LINKED(u->source->state))
1306 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1309 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1310 pa_source_get_volume(u->source, true);
1311 pa_source_get_mute(u->source, true);
1317 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1318 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1321 pa_assert(u->mixer_handle);
1323 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1326 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1329 if (mask & SND_CTL_EVENT_MASK_VALUE)
1330 pa_source_update_volume_and_mute(u->source);
1335 static void source_get_volume_cb(pa_source *s) {
1336 struct userdata *u = s->userdata;
1338 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1341 pa_assert(u->mixer_path);
1342 pa_assert(u->mixer_handle);
1344 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1347 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1348 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1350 pa_log_debug("Read hardware volume: %s",
1351 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1353 if (pa_cvolume_equal(&u->hardware_volume, &r))
1356 s->real_volume = u->hardware_volume = r;
1358 /* Hmm, so the hardware volume changed, let's reset our software volume */
1359 if (u->mixer_path->has_dB)
1360 pa_source_set_soft_volume(s, NULL);
1363 static void source_set_volume_cb(pa_source *s) {
1364 struct userdata *u = s->userdata;
1366 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1367 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1368 bool write_to_hw = !deferred_volume;
1371 pa_assert(u->mixer_path);
1372 pa_assert(u->mixer_handle);
1374 /* Shift up by the base volume */
1375 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1377 /* If the set_volume() is called because of ucm active_port changing, the
1378 * volume should be written to hw immediately, otherwise this volume will be
1379 * overridden by calling get_volume_cb() which is called by
1380 * _disdev/_enadev() -> io_mixer_callback() */
1381 if (u->ucm_context && s->port_changing)
1384 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, write_to_hw) < 0)
1387 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1388 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1390 u->hardware_volume = r;
1392 if (u->mixer_path->has_dB) {
1393 pa_cvolume new_soft_volume;
1394 bool accurate_enough;
1396 /* Match exactly what the user requested by software */
1397 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1399 /* If the adjustment to do in software is only minimal we
1400 * can skip it. That saves us CPU at the expense of a bit of
1403 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1404 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1406 pa_log_debug("Requested volume: %s",
1407 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1408 pa_log_debug("Got hardware volume: %s",
1409 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1410 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1411 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1412 pa_yes_no(accurate_enough));
1414 if (!accurate_enough)
1415 s->soft_volume = new_soft_volume;
1418 pa_log_debug("Wrote hardware volume: %s",
1419 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1421 /* We can't match exactly what the user requested, hence let's
1422 * at least tell the user about it */
1428 static void source_write_volume_cb(pa_source *s) {
1429 struct userdata *u = s->userdata;
1430 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1433 pa_assert(u->mixer_path);
1434 pa_assert(u->mixer_handle);
1435 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1437 /* Shift up by the base volume */
1438 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1440 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1441 pa_log_error("Writing HW volume failed");
1444 bool accurate_enough;
1446 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1447 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1449 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1451 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1452 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1454 if (!accurate_enough) {
1455 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1457 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1458 pa_cvolume_snprint_verbose(volume_buf[0],
1459 sizeof(volume_buf[0]),
1460 &s->thread_info.current_hw_volume,
1463 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1468 static int source_get_mute_cb(pa_source *s, bool *mute) {
1469 struct userdata *u = s->userdata;
1472 pa_assert(u->mixer_path);
1473 pa_assert(u->mixer_handle);
1475 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1481 static void source_set_mute_cb(pa_source *s) {
1482 struct userdata *u = s->userdata;
1485 pa_assert(u->mixer_path);
1486 pa_assert(u->mixer_handle);
1488 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1491 static void mixer_volume_init(struct userdata *u) {
1494 if (!u->mixer_path || !u->mixer_path->has_volume) {
1495 pa_source_set_write_volume_callback(u->source, NULL);
1496 pa_source_set_get_volume_callback(u->source, NULL);
1497 pa_source_set_set_volume_callback(u->source, NULL);
1499 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1501 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1502 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1504 if (u->mixer_path->has_dB && u->deferred_volume) {
1505 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1506 pa_log_info("Successfully enabled deferred volume.");
1508 pa_source_set_write_volume_callback(u->source, NULL);
1510 if (u->mixer_path->has_dB) {
1511 pa_source_enable_decibel_volume(u->source, true);
1512 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1514 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1515 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1517 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1519 pa_source_enable_decibel_volume(u->source, false);
1520 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1522 u->source->base_volume = PA_VOLUME_NORM;
1523 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1526 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1529 if (!u->mixer_path || !u->mixer_path->has_mute) {
1530 pa_source_set_get_mute_callback(u->source, NULL);
1531 pa_source_set_set_mute_callback(u->source, NULL);
1532 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1534 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1535 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1536 pa_log_info("Using hardware mute control.");
1540 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1541 struct userdata *u = s->userdata;
1542 pa_alsa_ucm_port_data *data;
1546 pa_assert(u->ucm_context);
1548 data = PA_DEVICE_PORT_DATA(p);
1549 u->mixer_path = data->path;
1550 mixer_volume_init(u);
1552 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1553 pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_SYNC_MIXER, p, 0, NULL);
1557 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1560 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1561 struct userdata *u = s->userdata;
1562 pa_alsa_port_data *data;
1566 pa_assert(u->mixer_handle);
1567 pa_assert(!u->ucm_context);
1569 data = PA_DEVICE_PORT_DATA(p);
1570 pa_assert_se(u->mixer_path = data->path);
1571 mixer_volume_init(u);
1573 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1574 pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_SYNC_MIXER, p, 0, NULL);
1581 static void source_update_requested_latency_cb(pa_source *s) {
1582 struct userdata *u = s->userdata;
1584 pa_assert(u->use_tsched); /* only when timer scheduling is used
1585 * we can dynamically adjust the
1591 update_sw_params(u);
1594 static void source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1595 struct userdata *u = s->userdata;
1597 bool format_supported = false;
1598 bool rate_supported = false;
1602 for (i = 0; u->supported_formats[i] != PA_SAMPLE_MAX; i++) {
1603 if (u->supported_formats[i] == spec->format) {
1604 pa_source_set_sample_format(u->source, spec->format);
1605 format_supported = true;
1610 if (!format_supported) {
1611 pa_log_info("Source does not support sample format of %s, set it to a verified value",
1612 pa_sample_format_to_string(spec->format));
1613 pa_source_set_sample_format(u->source, u->verified_sample_spec.format);
1616 for (i = 0; u->supported_rates[i]; i++) {
1617 if (u->supported_rates[i] == spec->rate) {
1618 pa_source_set_sample_rate(u->source, spec->rate);
1619 rate_supported = true;
1624 if (!rate_supported) {
1625 pa_log_info("Source does not support sample rate of %u, set it to a verfied value", spec->rate);
1626 pa_source_set_sample_rate(u->source, u->verified_sample_spec.rate);
1630 static void thread_func(void *userdata) {
1631 struct userdata *u = userdata;
1632 unsigned short revents = 0;
1636 pa_log_debug("Thread starting up");
1638 if (u->core->realtime_scheduling)
1639 pa_thread_make_realtime(u->core->realtime_priority);
1641 pa_thread_mq_install(&u->thread_mq);
1645 pa_usec_t rtpoll_sleep = 0, real_sleep;
1648 pa_log_debug("Loop");
1651 /* Read some data and pass it to the sources */
1652 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1654 pa_usec_t sleep_usec = 0;
1655 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1658 pa_log_info("Starting capture.");
1659 snd_pcm_start(u->pcm_handle);
1661 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1667 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1669 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1674 /* pa_log_debug("work_done = %i", work_done); */
1679 if (u->use_tsched) {
1682 /* OK, the capture buffer is now empty, let's
1683 * calculate when to wake up next */
1685 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1687 /* Convert from the sound card time domain to the
1688 * system time domain */
1689 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1691 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1693 /* We don't trust the conversion, so we wake up whatever comes first */
1694 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1698 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1699 pa_usec_t volume_sleep;
1700 pa_source_volume_change_apply(u->source, &volume_sleep);
1701 if (volume_sleep > 0) {
1702 if (rtpoll_sleep > 0)
1703 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1705 rtpoll_sleep = volume_sleep;
1709 if (rtpoll_sleep > 0) {
1710 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1711 real_sleep = pa_rtclock_now();
1714 pa_rtpoll_set_timer_disabled(u->rtpoll);
1716 /* Hmm, nothing to do. Let's sleep */
1717 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1720 if (rtpoll_sleep > 0) {
1721 real_sleep = pa_rtclock_now() - real_sleep;
1723 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1724 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1725 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1727 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1728 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1729 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1730 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1733 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1734 pa_source_volume_change_apply(u->source, NULL);
1739 /* Tell ALSA about this and process its response */
1740 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1741 struct pollfd *pollfd;
1745 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1747 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1748 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1752 if (revents & ~POLLIN) {
1753 if ((err = pa_alsa_recover_from_poll(u->pcm_handle, revents)) < 0)
1756 /* Stream needs to be restarted */
1759 if (unsuspend(u, true) < 0)
1765 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1766 pa_log_debug("Wakeup from ALSA!");
1773 /* If this was no regular exit from the loop we have to continue
1774 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1775 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1776 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1779 pa_log_debug("Thread shutting down");
1782 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1788 pa_assert(device_name);
1790 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1791 pa_source_new_data_set_name(data, n);
1792 data->namereg_fail = true;
1796 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1797 data->namereg_fail = true;
1799 n = device_id ? device_id : device_name;
1800 data->namereg_fail = false;
1804 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1806 t = pa_sprintf_malloc("alsa_input.%s", n);
1808 pa_source_new_data_set_name(data, t);
1812 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1815 if (!mapping && !element)
1818 if (!element && mapping && pa_alsa_path_set_is_empty(mapping->input_path_set))
1821 u->mixers = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func,
1822 NULL, (pa_free_cb_t) pa_alsa_mixer_free);
1824 mdev = mapping ? pa_proplist_gets(mapping->proplist, "alsa.mixer_device") : NULL;
1826 u->mixer_handle = pa_alsa_open_mixer_by_name(u->mixers, mdev, false);
1828 u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->mixers, u->pcm_handle, false);
1830 if (!u->mixer_handle) {
1831 pa_log_info("Failed to find a working mixer device.");
1837 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1840 if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
1843 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1844 pa_alsa_path_dump(u->mixer_path);
1846 u->mixer_path_set = mapping->input_path_set;
1853 if (u->mixer_path) {
1854 pa_alsa_path_free(u->mixer_path);
1855 u->mixer_path = NULL;
1858 u->mixer_handle = NULL;
1859 pa_hashmap_free(u->mixers);
1863 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1864 bool need_mixer_callback = false;
1868 /* This code is before the u->mixer_handle check, because if the UCM
1869 * configuration doesn't specify volume or mute controls, u->mixer_handle
1870 * will be NULL, but the UCM device enable sequence will still need to be
1872 if (u->source->active_port && u->ucm_context) {
1873 if (pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
1877 if (!u->mixer_handle)
1880 if (u->source->active_port) {
1881 if (!u->ucm_context) {
1882 pa_alsa_port_data *data;
1884 /* We have a list of supported paths, so let's activate the
1885 * one that has been chosen as active */
1887 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1888 u->mixer_path = data->path;
1890 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1892 pa_alsa_ucm_port_data *data;
1894 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1896 /* Now activate volume controls, if any */
1898 u->mixer_path = data->path;
1899 pa_alsa_path_select(u->mixer_path, NULL, u->mixer_handle, u->source->muted);
1904 if (!u->mixer_path && u->mixer_path_set)
1905 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1907 if (u->mixer_path) {
1908 /* Hmm, we have only a single path, then let's activate it */
1910 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1915 mixer_volume_init(u);
1917 /* Will we need to register callbacks? */
1918 if (u->mixer_path_set && u->mixer_path_set->paths) {
1922 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1923 if (p->has_volume || p->has_mute)
1924 need_mixer_callback = true;
1927 else if (u->mixer_path)
1928 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1930 if (need_mixer_callback) {
1931 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1932 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1933 u->mixer_pd = pa_alsa_mixer_pdata_new();
1934 mixer_callback = io_mixer_callback;
1936 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1937 pa_log("Failed to initialize file descriptor monitoring");
1941 u->mixer_fdl = pa_alsa_fdlist_new();
1942 mixer_callback = ctl_mixer_callback;
1944 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1945 pa_log("Failed to initialize file descriptor monitoring");
1950 if (u->mixer_path_set)
1951 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1953 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1959 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1961 struct userdata *u = NULL;
1962 const char *dev_id = NULL, *key, *mod_name;
1964 char *thread_name = NULL;
1965 uint32_t alternate_sample_rate;
1967 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1968 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1970 bool use_mmap = true;
1971 bool use_tsched = true;
1972 bool ignore_dB = false;
1973 bool namereg_fail = false;
1974 bool deferred_volume = false;
1975 bool fixed_latency_range = false;
1978 bool avoid_resampling;
1979 pa_source_new_data data;
1982 pa_alsa_profile_set *profile_set = NULL;
1988 ss = m->core->default_sample_spec;
1989 map = m->core->default_channel_map;
1990 avoid_resampling = m->core->avoid_resampling;
1992 /* Pick sample spec overrides from the mapping, if any */
1994 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1995 ss.format = mapping->sample_spec.format;
1996 if (mapping->sample_spec.rate != 0)
1997 ss.rate = mapping->sample_spec.rate;
1998 if (mapping->sample_spec.channels != 0) {
1999 ss.channels = mapping->sample_spec.channels;
2000 if (pa_channel_map_valid(&mapping->channel_map))
2001 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2005 /* Override with modargs if provided */
2006 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2007 pa_log("Failed to parse sample specification and channel map");
2011 alternate_sample_rate = m->core->alternate_sample_rate;
2012 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2013 pa_log("Failed to parse alternate sample rate");
2017 frame_size = pa_frame_size(&ss);
2019 nfrags = m->core->default_n_fragments;
2020 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2022 frag_size = (uint32_t) frame_size;
2023 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2024 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2026 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2027 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2028 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2029 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2030 pa_log("Failed to parse buffer metrics");
2034 buffer_size = nfrags * frag_size;
2036 period_frames = frag_size/frame_size;
2037 buffer_frames = buffer_size/frame_size;
2038 tsched_frames = tsched_size/frame_size;
2040 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2041 pa_log("Failed to parse mmap argument.");
2045 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2046 pa_log("Failed to parse tsched argument.");
2050 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2051 pa_log("Failed to parse ignore_dB argument.");
2055 deferred_volume = m->core->deferred_volume;
2056 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2057 pa_log("Failed to parse deferred_volume argument.");
2061 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2062 pa_log("Failed to parse fixed_latency_range argument.");
2066 use_tsched = pa_alsa_may_tsched(use_tsched);
2068 u = pa_xnew0(struct userdata, 1);
2071 u->use_mmap = use_mmap;
2072 u->use_tsched = use_tsched;
2073 u->tsched_size = tsched_size;
2074 u->initial_info.nfrags = (size_t) nfrags;
2075 u->initial_info.fragment_size = (size_t) frag_size;
2076 u->initial_info.tsched_size = (size_t) tsched_size;
2077 u->initial_info.tsched_watermark = (size_t) tsched_watermark;
2078 u->deferred_volume = deferred_volume;
2079 u->fixed_latency_range = fixed_latency_range;
2081 u->rtpoll = pa_rtpoll_new();
2083 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2084 pa_log("pa_thread_mq_init() failed.");
2088 u->smoother = pa_smoother_new(
2089 SMOOTHER_ADJUST_USEC,
2090 SMOOTHER_WINDOW_USEC,
2096 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2099 if (mapping && mapping->ucm_context.ucm)
2100 u->ucm_context = &mapping->ucm_context;
2102 dev_id = pa_modargs_get_value(
2104 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2106 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2108 if (reserve_init(u, dev_id) < 0)
2111 if (reserve_monitor_init(u, dev_id) < 0)
2117 /* Force ALSA to reread its configuration if module-alsa-card didn't
2118 * do it for us. This matters if our device was hot-plugged after ALSA
2119 * has already read its configuration - see
2120 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2124 snd_config_update_free_global();
2128 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2129 pa_log("device_id= not set");
2133 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2134 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2135 pa_log("Failed to enable ucm modifier %s", mod_name);
2137 pa_log_debug("Enabled ucm modifier %s", mod_name);
2140 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2144 SND_PCM_STREAM_CAPTURE,
2145 &period_frames, &buffer_frames, tsched_frames,
2149 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2151 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2154 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2158 SND_PCM_STREAM_CAPTURE,
2159 &period_frames, &buffer_frames, tsched_frames,
2160 &b, &d, profile_set, &mapping)))
2165 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2166 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2169 SND_PCM_STREAM_CAPTURE,
2170 &period_frames, &buffer_frames, tsched_frames,
2175 pa_assert(u->device_name);
2176 pa_log_info("Successfully opened device %s.", u->device_name);
2178 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2179 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2184 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2186 if (use_mmap && !b) {
2187 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2188 u->use_mmap = use_mmap = false;
2191 if (use_tsched && (!b || !d)) {
2192 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2193 u->use_tsched = use_tsched = false;
2197 pa_log_info("Successfully enabled mmap() mode.");
2199 if (u->use_tsched) {
2200 pa_log_info("Successfully enabled timer-based scheduling mode.");
2201 if (u->fixed_latency_range)
2202 pa_log_info("Disabling latency range changes on overrun");
2205 u->verified_sample_spec = ss;
2207 u->supported_formats = pa_alsa_get_supported_formats(u->pcm_handle, ss.format);
2208 if (!u->supported_formats) {
2209 pa_log_error("Failed to find any supported sample formats.");
2213 u->supported_rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2214 if (!u->supported_rates) {
2215 pa_log_error("Failed to find any supported sample rates.");
2219 /* ALSA might tweak the sample spec, so recalculate the frame size */
2220 frame_size = pa_frame_size(&ss);
2222 pa_source_new_data_init(&data);
2223 data.driver = driver;
2226 set_source_name(&data, ma, dev_id, u->device_name, mapping);
2228 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2229 * variable instead of using &data.namereg_fail directly, because
2230 * data.namereg_fail is a bitfield and taking the address of a bitfield
2231 * variable is impossible. */
2232 namereg_fail = data.namereg_fail;
2233 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2234 pa_log("Failed to parse namereg_fail argument.");
2235 pa_source_new_data_done(&data);
2238 data.namereg_fail = namereg_fail;
2240 if (pa_modargs_get_value_boolean(ma, "avoid_resampling", &avoid_resampling) < 0) {
2241 pa_log("Failed to parse avoid_resampling argument.");
2242 pa_source_new_data_done(&data);
2245 pa_source_new_data_set_avoid_resampling(&data, avoid_resampling);
2247 pa_source_new_data_set_sample_spec(&data, &ss);
2248 pa_source_new_data_set_channel_map(&data, &map);
2249 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2251 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2252 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2253 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2254 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2255 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2258 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2259 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2262 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2263 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2266 pa_alsa_init_description(data.proplist, card);
2268 if (u->control_device)
2269 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2271 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2272 pa_log("Invalid properties");
2273 pa_source_new_data_done(&data);
2277 if (u->ucm_context) {
2278 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card, u->pcm_handle, ignore_dB);
2279 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2281 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2282 if (u->mixer_path_set)
2283 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2286 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2287 volume_is_set = data.volume_is_set;
2288 mute_is_set = data.muted_is_set;
2289 pa_source_new_data_done(&data);
2292 pa_log("Failed to create source object");
2296 if (u->ucm_context) {
2297 pa_device_port *port;
2298 unsigned h_prio = 0;
2299 PA_HASHMAP_FOREACH(port, u->source->ports, state) {
2300 if (!h_prio || port->priority > h_prio)
2301 h_prio = port->priority;
2303 /* ucm ports prioriy is 100, 200, ..., 900, change it to units digit */
2304 h_prio = h_prio / 100;
2305 u->source->priority += h_prio;
2308 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2309 &u->source->thread_info.volume_change_safety_margin) < 0) {
2310 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2314 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2315 &u->source->thread_info.volume_change_extra_delay) < 0) {
2316 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2320 u->source->parent.process_msg = source_process_msg;
2322 u->source->update_requested_latency = source_update_requested_latency_cb;
2323 u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb;
2324 u->source->set_state_in_io_thread = source_set_state_in_io_thread_cb;
2326 u->source->set_port = source_set_port_ucm_cb;
2328 u->source->set_port = source_set_port_cb;
2329 u->source->reconfigure = source_reconfigure_cb;
2330 u->source->userdata = u;
2332 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2333 pa_source_set_rtpoll(u->source, u->rtpoll);
2335 u->frame_size = frame_size;
2336 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2337 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2338 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2339 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2341 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2342 (double) u->hwbuf_size / (double) u->fragment_size,
2343 (long unsigned) u->fragment_size,
2344 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2345 (long unsigned) u->hwbuf_size,
2346 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2348 if (u->use_tsched) {
2349 u->tsched_watermark_ref = tsched_watermark;
2350 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2353 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2357 if (update_sw_params(u) < 0)
2360 if (setup_mixer(u, ignore_dB) < 0)
2363 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2365 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2366 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2367 pa_log("Failed to create thread.");
2370 pa_xfree(thread_name);
2373 /* Get initial mixer settings */
2374 if (volume_is_set) {
2375 if (u->source->set_volume)
2376 u->source->set_volume(u->source);
2378 if (u->source->get_volume)
2379 u->source->get_volume(u->source);
2383 if (u->source->set_mute)
2384 u->source->set_mute(u->source);
2386 if (u->source->get_mute) {
2389 if (u->source->get_mute(u->source, &mute) >= 0)
2390 pa_source_set_mute(u->source, mute, false);
2394 if ((volume_is_set || mute_is_set) && u->source->write_volume)
2395 u->source->write_volume(u->source);
2397 pa_source_put(u->source);
2400 pa_alsa_profile_set_free(profile_set);
2405 pa_xfree(thread_name);
2411 pa_alsa_profile_set_free(profile_set);
2416 static void userdata_free(struct userdata *u) {
2420 pa_source_unlink(u->source);
2423 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2424 pa_thread_free(u->thread);
2427 pa_thread_mq_done(&u->thread_mq);
2430 pa_source_unref(u->source);
2433 pa_alsa_mixer_pdata_free(u->mixer_pd);
2435 if (u->alsa_rtpoll_item)
2436 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2439 pa_rtpoll_free(u->rtpoll);
2441 if (u->pcm_handle) {
2442 snd_pcm_drop(u->pcm_handle);
2443 snd_pcm_close(u->pcm_handle);
2447 pa_alsa_fdlist_free(u->mixer_fdl);
2449 /* Only free the mixer_path if the sink owns it */
2450 if (u->mixer_path && !u->mixer_path_set && !u->ucm_context)
2451 pa_alsa_path_free(u->mixer_path);
2454 pa_hashmap_free(u->mixers);
2457 pa_smoother_free(u->smoother);
2459 if (u->supported_formats)
2460 pa_xfree(u->supported_formats);
2462 if (u->supported_rates)
2463 pa_xfree(u->supported_rates);
2468 pa_xfree(u->device_name);
2469 pa_xfree(u->control_device);
2470 pa_xfree(u->paths_dir);
2474 void pa_alsa_source_free(pa_source *s) {
2477 pa_source_assert_ref(s);
2478 pa_assert_se(u = s->userdata);