2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <asoundlib.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
53 #include <modules/reserve-wrap.h>
55 #include "alsa-util.h"
56 #include "alsa-source.h"
58 /* #define DEBUG_TIMING */
60 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
89 pa_thread_mq thread_mq;
92 snd_pcm_t *pcm_handle;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
101 pa_cvolume hardware_volume;
110 tsched_watermark_ref,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
122 char *device_name; /* name of the PCM device */
123 char *control_device; /* name of the control device */
125 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
129 pa_rtpoll_item *alsa_rtpoll_item;
131 pa_smoother *smoother;
133 pa_usec_t smoother_interval;
134 pa_usec_t last_smoother_update;
136 pa_reserve_wrapper *reserve;
137 pa_hook_slot *reserve_slot;
138 pa_reserve_monitor_wrapper *monitor;
139 pa_hook_slot *monitor_slot;
142 pa_alsa_ucm_mapping_context *ucm_context;
145 static void userdata_free(struct userdata *u);
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
151 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
152 return PA_HOOK_CANCEL;
157 static void reserve_done(struct userdata *u) {
160 if (u->reserve_slot) {
161 pa_hook_slot_free(u->reserve_slot);
162 u->reserve_slot = NULL;
166 pa_reserve_wrapper_unref(u->reserve);
171 static void reserve_update(struct userdata *u) {
172 const char *description;
175 if (!u->source || !u->reserve)
178 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
182 static int reserve_init(struct userdata *u, const char *dname) {
191 if (pa_in_system_mode())
194 if (!(rname = pa_alsa_get_reserve_name(dname)))
197 /* We are resuming, try to lock the device */
198 u->reserve = pa_reserve_wrapper_get(u->core, rname);
206 pa_assert(!u->reserve_slot);
207 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
218 b = PA_PTR_TO_UINT(busy) && !u->reserve;
220 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
224 static void monitor_done(struct userdata *u) {
227 if (u->monitor_slot) {
228 pa_hook_slot_free(u->monitor_slot);
229 u->monitor_slot = NULL;
233 pa_reserve_monitor_wrapper_unref(u->monitor);
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
244 if (pa_in_system_mode())
247 if (!(rname = pa_alsa_get_reserve_name(dname)))
250 /* We are resuming, try to lock the device */
251 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
257 pa_assert(!u->monitor_slot);
258 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
263 static void fix_min_sleep_wakeup(struct userdata *u) {
264 size_t max_use, max_use_2;
267 pa_assert(u->use_tsched);
269 max_use = u->hwbuf_size - u->hwbuf_unused;
270 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
272 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
273 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
275 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
276 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
279 static void fix_tsched_watermark(struct userdata *u) {
282 pa_assert(u->use_tsched);
284 max_use = u->hwbuf_size - u->hwbuf_unused;
286 if (u->tsched_watermark > max_use - u->min_sleep)
287 u->tsched_watermark = max_use - u->min_sleep;
289 if (u->tsched_watermark < u->min_wakeup)
290 u->tsched_watermark = u->min_wakeup;
293 static void increase_watermark(struct userdata *u) {
294 size_t old_watermark;
295 pa_usec_t old_min_latency, new_min_latency;
298 pa_assert(u->use_tsched);
300 /* First, just try to increase the watermark */
301 old_watermark = u->tsched_watermark;
302 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
303 fix_tsched_watermark(u);
305 if (old_watermark != u->tsched_watermark) {
306 pa_log_info("Increasing wakeup watermark to %0.2f ms",
307 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
311 /* Hmm, we cannot increase the watermark any further, hence let's
312 raise the latency unless doing so was disabled in
314 if (u->fixed_latency_range)
317 old_min_latency = u->source->thread_info.min_latency;
318 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
321 if (old_min_latency != new_min_latency) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency / PA_USEC_PER_MSEC);
325 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
328 /* When we reach this we're officialy fucked! */
331 static void decrease_watermark(struct userdata *u) {
332 size_t old_watermark;
336 pa_assert(u->use_tsched);
338 now = pa_rtclock_now();
340 if (u->watermark_dec_not_before <= 0)
343 if (u->watermark_dec_not_before > now)
346 old_watermark = u->tsched_watermark;
348 if (u->tsched_watermark < u->watermark_dec_step)
349 u->tsched_watermark = u->tsched_watermark / 2;
351 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353 fix_tsched_watermark(u);
355 if (old_watermark != u->tsched_watermark)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
359 /* We don't change the latency range*/
362 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
368 pa_assert(sleep_usec);
369 pa_assert(process_usec);
372 pa_assert(u->use_tsched);
374 usec = pa_source_get_requested_latency_within_thread(u->source);
376 if (usec == (pa_usec_t) -1)
377 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
379 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
384 *sleep_usec = usec - wm;
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
395 static int try_recover(struct userdata *u, const char *call, int err) {
400 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402 pa_assert(err != -EAGAIN);
405 pa_log_debug("%s: Buffer overrun!", call);
407 if (err == -ESTRPIPE)
408 pa_log_debug("%s: System suspended!", call);
410 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411 pa_log("%s: %s", call, pa_alsa_strerror(err));
419 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
420 size_t left_to_record;
421 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
422 pa_bool_t overrun = FALSE;
424 /* We use <= instead of < for this check here because an overrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
429 if (n_bytes <= rec_space)
430 left_to_record = rec_space - n_bytes;
433 /* We got a dropout. What a mess! */
441 if (pa_log_ratelimit(PA_LOG_INFO))
442 pa_log_info("Overrun!");
446 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
450 pa_bool_t reset_not_before = TRUE;
452 if (overrun || left_to_record < u->watermark_inc_threshold)
453 increase_watermark(u);
454 else if (left_to_record > u->watermark_dec_threshold) {
455 reset_not_before = FALSE;
457 /* We decrease the watermark only if have actually
458 * been woken up by a timeout. If something else woke
459 * us up it's too easy to fulfill the deadlines... */
462 decrease_watermark(u);
465 if (reset_not_before)
466 u->watermark_dec_not_before = 0;
469 return left_to_record;
472 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473 pa_bool_t work_done = FALSE;
474 pa_usec_t max_sleep_usec = 0, process_usec = 0;
475 size_t left_to_record;
479 pa_source_assert_ref(u->source);
482 hw_sleep_time(u, &max_sleep_usec, &process_usec);
488 pa_bool_t after_avail = TRUE;
490 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
492 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
498 n_bytes = (size_t) n * u->frame_size;
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
504 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
509 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
511 pa_log_debug("Not reading, because too early.");
516 if (PA_UNLIKELY(n_bytes <= 0)) {
520 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
521 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
529 pa_log_debug("Not reading, because not necessary.");
537 pa_log_debug("Not filling up, because already too many iterations.");
546 pa_log_debug("Reading");
553 const snd_pcm_channel_area_t *areas;
554 snd_pcm_uframes_t offset, frames;
555 snd_pcm_sframes_t sframes;
557 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
558 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
560 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
562 if (!after_avail && err == -EAGAIN)
565 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
571 /* Make sure that if these memblocks need to be copied they will fit into one slot */
572 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
573 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
575 if (!after_avail && frames == 0)
578 pa_assert(frames > 0);
581 /* Check these are multiples of 8 bit */
582 pa_assert((areas[0].first & 7) == 0);
583 pa_assert((areas[0].step & 7)== 0);
585 /* We assume a single interleaved memory buffer */
586 pa_assert((areas[0].first >> 3) == 0);
587 pa_assert((areas[0].step >> 3) == u->frame_size);
589 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
591 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
592 chunk.length = pa_memblock_get_length(chunk.memblock);
595 pa_source_post(u->source, &chunk);
596 pa_memblock_unref_fixed(chunk.memblock);
598 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
600 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
608 u->read_count += frames * u->frame_size;
611 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
614 if ((size_t) frames * u->frame_size >= n_bytes)
617 n_bytes -= (size_t) frames * u->frame_size;
622 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
623 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
625 if (*sleep_usec > process_usec)
626 *sleep_usec -= process_usec;
631 return work_done ? 1 : 0;
634 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
635 int work_done = FALSE;
636 pa_usec_t max_sleep_usec = 0, process_usec = 0;
637 size_t left_to_record;
641 pa_source_assert_ref(u->source);
644 hw_sleep_time(u, &max_sleep_usec, &process_usec);
650 pa_bool_t after_avail = TRUE;
652 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
654 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
660 n_bytes = (size_t) n * u->frame_size;
661 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
666 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
669 if (PA_UNLIKELY(n_bytes <= 0)) {
673 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
674 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
686 pa_log_debug("Not filling up, because already too many iterations.");
696 snd_pcm_sframes_t frames;
699 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
701 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
703 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
704 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
706 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
708 p = pa_memblock_acquire(chunk.memblock);
709 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
710 pa_memblock_release(chunk.memblock);
712 if (PA_UNLIKELY(frames < 0)) {
713 pa_memblock_unref(chunk.memblock);
715 if (!after_avail && (int) frames == -EAGAIN)
718 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
724 if (!after_avail && frames == 0) {
725 pa_memblock_unref(chunk.memblock);
729 pa_assert(frames > 0);
733 chunk.length = (size_t) frames * u->frame_size;
735 pa_source_post(u->source, &chunk);
736 pa_memblock_unref(chunk.memblock);
740 u->read_count += frames * u->frame_size;
742 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
744 if ((size_t) frames * u->frame_size >= n_bytes)
747 n_bytes -= (size_t) frames * u->frame_size;
752 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
753 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
755 if (*sleep_usec > process_usec)
756 *sleep_usec -= process_usec;
761 return work_done ? 1 : 0;
764 static void update_smoother(struct userdata *u) {
765 snd_pcm_sframes_t delay = 0;
768 pa_usec_t now1 = 0, now2;
769 snd_pcm_status_t *status;
771 snd_pcm_status_alloca(&status);
774 pa_assert(u->pcm_handle);
776 /* Let's update the time smoother */
778 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
779 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
783 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
784 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
786 snd_htimestamp_t htstamp = { 0, 0 };
787 snd_pcm_status_get_htstamp(status, &htstamp);
788 now1 = pa_timespec_load(&htstamp);
791 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
793 now1 = pa_rtclock_now();
795 /* check if the time since the last update is bigger than the interval */
796 if (u->last_smoother_update > 0)
797 if (u->last_smoother_update + u->smoother_interval > now1)
800 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
801 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
803 pa_smoother_put(u->smoother, now1, now2);
805 u->last_smoother_update = now1;
806 /* exponentially increase the update interval up to the MAX limit */
807 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
810 static pa_usec_t source_get_latency(struct userdata *u) {
812 pa_usec_t now1, now2;
816 now1 = pa_rtclock_now();
817 now2 = pa_smoother_get(u->smoother, now1);
819 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
821 return delay >= 0 ? (pa_usec_t) delay : 0;
824 static int build_pollfd(struct userdata *u) {
826 pa_assert(u->pcm_handle);
828 if (u->alsa_rtpoll_item)
829 pa_rtpoll_item_free(u->alsa_rtpoll_item);
831 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
837 /* Called from IO context */
838 static int suspend(struct userdata *u) {
840 pa_assert(u->pcm_handle);
842 pa_smoother_pause(u->smoother, pa_rtclock_now());
845 snd_pcm_close(u->pcm_handle);
846 u->pcm_handle = NULL;
848 if (u->alsa_rtpoll_item) {
849 pa_rtpoll_item_free(u->alsa_rtpoll_item);
850 u->alsa_rtpoll_item = NULL;
853 pa_log_info("Device suspended...");
858 /* Called from IO context */
859 static int update_sw_params(struct userdata *u) {
860 snd_pcm_uframes_t avail_min;
865 /* Use the full buffer if no one asked us for anything specific */
871 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
874 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
876 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
878 /* We need at least one sample in our buffer */
880 if (PA_UNLIKELY(b < u->frame_size))
883 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
886 fix_min_sleep_wakeup(u);
887 fix_tsched_watermark(u);
890 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
895 pa_usec_t sleep_usec, process_usec;
897 hw_sleep_time(u, &sleep_usec, &process_usec);
898 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
901 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
903 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
904 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
911 /* Called from IO Context on unsuspend or from main thread when creating source */
912 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
915 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
916 &u->source->sample_spec);
918 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
919 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
921 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
922 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
924 fix_min_sleep_wakeup(u);
925 fix_tsched_watermark(u);
928 pa_source_set_latency_range_within_thread(u->source,
930 pa_bytes_to_usec(u->hwbuf_size, ss));
932 pa_source_set_latency_range(u->source,
934 pa_bytes_to_usec(u->hwbuf_size, ss));
936 /* work-around assert in pa_source_set_latency_within_thead,
937 keep track of min_latency and reuse it when
938 this routine is called from IO context */
939 u->min_latency_ref = u->source->thread_info.min_latency;
942 pa_log_info("Time scheduling watermark is %0.2fms",
943 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
951 snd_pcm_uframes_t period_size, buffer_size;
954 pa_assert(!u->pcm_handle);
956 pa_log_info("Trying resume...");
958 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
960 SND_PCM_NO_AUTO_RESAMPLE|
961 SND_PCM_NO_AUTO_CHANNELS|
962 SND_PCM_NO_AUTO_FORMAT)) < 0) {
963 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
967 ss = u->source->sample_spec;
968 period_size = u->fragment_size / u->frame_size;
969 buffer_size = u->hwbuf_size / u->frame_size;
973 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
974 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
978 if (b != u->use_mmap || d != u->use_tsched) {
979 pa_log_warn("Resume failed, couldn't get original access mode.");
983 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
984 pa_log_warn("Resume failed, couldn't restore original sample settings.");
988 if (period_size*u->frame_size != u->fragment_size ||
989 buffer_size*u->frame_size != u->hwbuf_size) {
990 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
996 if (update_sw_params(u) < 0)
999 if (build_pollfd(u) < 0)
1002 /* FIXME: We need to reload the volume somehow */
1005 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1006 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1007 u->last_smoother_update = 0;
1011 /* reset the watermark to the value defined when source was created */
1013 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1015 pa_log_info("Resumed successfully...");
1020 if (u->pcm_handle) {
1021 snd_pcm_close(u->pcm_handle);
1022 u->pcm_handle = NULL;
1028 /* Called from IO context */
1029 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1030 struct userdata *u = PA_SOURCE(o)->userdata;
1034 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1038 r = source_get_latency(u);
1040 *((pa_usec_t*) data) = r;
1045 case PA_SOURCE_MESSAGE_SET_STATE:
1047 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1049 case PA_SOURCE_SUSPENDED: {
1052 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1054 if ((r = suspend(u)) < 0)
1060 case PA_SOURCE_IDLE:
1061 case PA_SOURCE_RUNNING: {
1064 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1065 if (build_pollfd(u) < 0)
1069 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1070 if ((r = unsuspend(u)) < 0)
1077 case PA_SOURCE_UNLINKED:
1078 case PA_SOURCE_INIT:
1079 case PA_SOURCE_INVALID_STATE:
1086 return pa_source_process_msg(o, code, data, offset, chunk);
1089 /* Called from main context */
1090 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1091 pa_source_state_t old_state;
1094 pa_source_assert_ref(s);
1095 pa_assert_se(u = s->userdata);
1097 old_state = pa_source_get_state(u->source);
1099 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1101 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1102 if (reserve_init(u, u->device_name) < 0)
1103 return -PA_ERR_BUSY;
1108 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1109 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1112 pa_assert(u->mixer_handle);
1114 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1117 if (!PA_SOURCE_IS_LINKED(u->source->state))
1120 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1121 pa_source_set_mixer_dirty(u->source, TRUE);
1125 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1126 pa_source_get_volume(u->source, TRUE);
1127 pa_source_get_mute(u->source, TRUE);
1133 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1134 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1137 pa_assert(u->mixer_handle);
1139 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1142 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1143 pa_source_set_mixer_dirty(u->source, TRUE);
1147 if (mask & SND_CTL_EVENT_MASK_VALUE)
1148 pa_source_update_volume_and_mute(u->source);
1153 static void source_get_volume_cb(pa_source *s) {
1154 struct userdata *u = s->userdata;
1156 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1159 pa_assert(u->mixer_path);
1160 pa_assert(u->mixer_handle);
1162 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1165 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1166 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1168 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1170 if (u->mixer_path->has_dB) {
1171 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1173 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1176 if (pa_cvolume_equal(&u->hardware_volume, &r))
1179 s->real_volume = u->hardware_volume = r;
1181 /* Hmm, so the hardware volume changed, let's reset our software volume */
1182 if (u->mixer_path->has_dB)
1183 pa_source_set_soft_volume(s, NULL);
1186 static void source_set_volume_cb(pa_source *s) {
1187 struct userdata *u = s->userdata;
1189 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1190 pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1193 pa_assert(u->mixer_path);
1194 pa_assert(u->mixer_handle);
1196 /* Shift up by the base volume */
1197 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1199 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1202 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1203 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1205 u->hardware_volume = r;
1207 if (u->mixer_path->has_dB) {
1208 pa_cvolume new_soft_volume;
1209 pa_bool_t accurate_enough;
1210 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1212 /* Match exactly what the user requested by software */
1213 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1215 /* If the adjustment to do in software is only minimal we
1216 * can skip it. That saves us CPU at the expense of a bit of
1219 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1220 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1222 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1223 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1224 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1225 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1226 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1227 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1228 pa_yes_no(accurate_enough));
1229 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1231 if (!accurate_enough)
1232 s->soft_volume = new_soft_volume;
1235 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1237 /* We can't match exactly what the user requested, hence let's
1238 * at least tell the user about it */
1244 static void source_write_volume_cb(pa_source *s) {
1245 struct userdata *u = s->userdata;
1246 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1249 pa_assert(u->mixer_path);
1250 pa_assert(u->mixer_handle);
1251 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1253 /* Shift up by the base volume */
1254 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1256 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1257 pa_log_error("Writing HW volume failed");
1260 pa_bool_t accurate_enough;
1262 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1263 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1265 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1267 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1268 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1270 if (!accurate_enough) {
1272 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1273 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1276 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1277 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1278 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1279 pa_log_debug(" in dB: %s (request) != %s",
1280 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1281 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1286 static void source_get_mute_cb(pa_source *s) {
1287 struct userdata *u = s->userdata;
1291 pa_assert(u->mixer_path);
1292 pa_assert(u->mixer_handle);
1294 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1300 static void source_set_mute_cb(pa_source *s) {
1301 struct userdata *u = s->userdata;
1304 pa_assert(u->mixer_path);
1305 pa_assert(u->mixer_handle);
1307 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1310 static void mixer_volume_init(struct userdata *u) {
1313 if (!u->mixer_path->has_volume) {
1314 pa_source_set_write_volume_callback(u->source, NULL);
1315 pa_source_set_get_volume_callback(u->source, NULL);
1316 pa_source_set_set_volume_callback(u->source, NULL);
1318 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1320 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1321 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1323 if (u->mixer_path->has_dB && u->deferred_volume) {
1324 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1325 pa_log_info("Successfully enabled deferred volume.");
1327 pa_source_set_write_volume_callback(u->source, NULL);
1329 if (u->mixer_path->has_dB) {
1330 pa_source_enable_decibel_volume(u->source, TRUE);
1331 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1333 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1334 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1336 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1338 pa_source_enable_decibel_volume(u->source, FALSE);
1339 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1341 u->source->base_volume = PA_VOLUME_NORM;
1342 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1345 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1348 if (!u->mixer_path->has_mute) {
1349 pa_source_set_get_mute_callback(u->source, NULL);
1350 pa_source_set_set_mute_callback(u->source, NULL);
1351 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1353 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1354 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1355 pa_log_info("Using hardware mute control.");
1359 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1360 struct userdata *u = s->userdata;
1364 pa_assert(u->ucm_context);
1366 return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1369 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1370 struct userdata *u = s->userdata;
1371 pa_alsa_port_data *data;
1375 pa_assert(u->mixer_handle);
1377 data = PA_DEVICE_PORT_DATA(p);
1379 pa_assert_se(u->mixer_path = data->path);
1380 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1382 mixer_volume_init(u);
1386 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1387 if (s->write_volume)
1397 static void source_update_requested_latency_cb(pa_source *s) {
1398 struct userdata *u = s->userdata;
1400 pa_assert(u->use_tsched); /* only when timer scheduling is used
1401 * we can dynamically adjust the
1407 update_sw_params(u);
1410 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1412 struct userdata *u = s->userdata;
1414 pa_bool_t supported = FALSE;
1418 for (i = 0; u->rates[i]; i++) {
1419 if (u->rates[i] == rate) {
1426 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1430 if (!PA_SOURCE_IS_OPENED(s->state)) {
1431 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1432 u->source->sample_spec.rate = rate;
1439 static void thread_func(void *userdata) {
1440 struct userdata *u = userdata;
1441 unsigned short revents = 0;
1445 pa_log_debug("Thread starting up");
1447 if (u->core->realtime_scheduling)
1448 pa_make_realtime(u->core->realtime_priority);
1450 pa_thread_mq_install(&u->thread_mq);
1454 pa_usec_t rtpoll_sleep = 0;
1457 pa_log_debug("Loop");
1460 /* Read some data and pass it to the sources */
1461 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1463 pa_usec_t sleep_usec = 0;
1464 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1467 pa_log_info("Starting capture.");
1468 snd_pcm_start(u->pcm_handle);
1470 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1476 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1478 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1483 /* pa_log_debug("work_done = %i", work_done); */
1488 if (u->use_tsched) {
1491 /* OK, the capture buffer is now empty, let's
1492 * calculate when to wake up next */
1494 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1496 /* Convert from the sound card time domain to the
1497 * system time domain */
1498 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1500 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1502 /* We don't trust the conversion, so we wake up whatever comes first */
1503 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1507 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1508 pa_usec_t volume_sleep;
1509 pa_source_volume_change_apply(u->source, &volume_sleep);
1510 if (volume_sleep > 0) {
1511 if (rtpoll_sleep > 0)
1512 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1514 rtpoll_sleep = volume_sleep;
1518 if (rtpoll_sleep > 0)
1519 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1521 pa_rtpoll_set_timer_disabled(u->rtpoll);
1523 /* Hmm, nothing to do. Let's sleep */
1524 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1527 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1528 pa_source_volume_change_apply(u->source, NULL);
1533 /* Tell ALSA about this and process its response */
1534 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1535 struct pollfd *pollfd;
1539 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1541 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1542 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1546 if (revents & ~POLLIN) {
1547 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1552 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1553 pa_log_debug("Wakeup from ALSA!");
1560 /* If this was no regular exit from the loop we have to continue
1561 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1562 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1563 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1566 pa_log_debug("Thread shutting down");
1569 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1575 pa_assert(device_name);
1577 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1578 pa_source_new_data_set_name(data, n);
1579 data->namereg_fail = TRUE;
1583 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1584 data->namereg_fail = TRUE;
1586 n = device_id ? device_id : device_name;
1587 data->namereg_fail = FALSE;
1591 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1593 t = pa_sprintf_malloc("alsa_input.%s", n);
1595 pa_source_new_data_set_name(data, t);
1599 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1602 if (!mapping && !element)
1605 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1606 pa_log_info("Failed to find a working mixer device.");
1612 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1615 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1618 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1619 pa_alsa_path_dump(u->mixer_path);
1620 } else if (!(u->mixer_path_set = mapping->input_path_set))
1627 if (u->mixer_path) {
1628 pa_alsa_path_free(u->mixer_path);
1629 u->mixer_path = NULL;
1632 if (u->mixer_handle) {
1633 snd_mixer_close(u->mixer_handle);
1634 u->mixer_handle = NULL;
1638 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1639 pa_bool_t need_mixer_callback = FALSE;
1643 if (!u->mixer_handle)
1646 if (u->source->active_port) {
1647 pa_alsa_port_data *data;
1649 /* We have a list of supported paths, so let's activate the
1650 * one that has been chosen as active */
1652 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1653 u->mixer_path = data->path;
1655 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1659 if (!u->mixer_path && u->mixer_path_set)
1660 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1662 if (u->mixer_path) {
1663 /* Hmm, we have only a single path, then let's activate it */
1665 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1670 mixer_volume_init(u);
1672 /* Will we need to register callbacks? */
1673 if (u->mixer_path_set && u->mixer_path_set->paths) {
1677 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1678 if (p->has_volume || p->has_mute)
1679 need_mixer_callback = TRUE;
1682 else if (u->mixer_path)
1683 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1685 if (need_mixer_callback) {
1686 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1687 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1688 u->mixer_pd = pa_alsa_mixer_pdata_new();
1689 mixer_callback = io_mixer_callback;
1691 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1692 pa_log("Failed to initialize file descriptor monitoring");
1696 u->mixer_fdl = pa_alsa_fdlist_new();
1697 mixer_callback = ctl_mixer_callback;
1699 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1700 pa_log("Failed to initialize file descriptor monitoring");
1705 if (u->mixer_path_set)
1706 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1708 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1714 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1716 struct userdata *u = NULL;
1717 const char *dev_id = NULL;
1719 uint32_t alternate_sample_rate;
1721 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1722 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1724 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1725 pa_source_new_data data;
1726 pa_alsa_profile_set *profile_set = NULL;
1731 ss = m->core->default_sample_spec;
1732 map = m->core->default_channel_map;
1733 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1734 pa_log("Failed to parse sample specification and channel map");
1738 alternate_sample_rate = m->core->alternate_sample_rate;
1739 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1740 pa_log("Failed to parse alternate sample rate");
1744 frame_size = pa_frame_size(&ss);
1746 nfrags = m->core->default_n_fragments;
1747 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1749 frag_size = (uint32_t) frame_size;
1750 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1751 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1753 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1754 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1755 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1756 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1757 pa_log("Failed to parse buffer metrics");
1761 buffer_size = nfrags * frag_size;
1763 period_frames = frag_size/frame_size;
1764 buffer_frames = buffer_size/frame_size;
1765 tsched_frames = tsched_size/frame_size;
1767 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1768 pa_log("Failed to parse mmap argument.");
1772 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1773 pa_log("Failed to parse tsched argument.");
1777 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1778 pa_log("Failed to parse ignore_dB argument.");
1782 deferred_volume = m->core->deferred_volume;
1783 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1784 pa_log("Failed to parse deferred_volume argument.");
1788 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1789 pa_log("Failed to parse fixed_latency_range argument.");
1793 use_tsched = pa_alsa_may_tsched(use_tsched);
1795 u = pa_xnew0(struct userdata, 1);
1798 u->use_mmap = use_mmap;
1799 u->use_tsched = use_tsched;
1800 u->deferred_volume = deferred_volume;
1801 u->fixed_latency_range = fixed_latency_range;
1803 u->rtpoll = pa_rtpoll_new();
1804 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1806 u->smoother = pa_smoother_new(
1807 SMOOTHER_ADJUST_USEC,
1808 SMOOTHER_WINDOW_USEC,
1814 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1817 if (mapping && mapping->ucm_context.ucm)
1818 u->ucm_context = &mapping->ucm_context;
1820 dev_id = pa_modargs_get_value(
1822 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1824 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1826 if (reserve_init(u, dev_id) < 0)
1829 if (reserve_monitor_init(u, dev_id) < 0)
1837 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1838 pa_log("device_id= not set");
1842 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1846 SND_PCM_STREAM_CAPTURE,
1847 &period_frames, &buffer_frames, tsched_frames,
1851 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1853 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1856 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1860 SND_PCM_STREAM_CAPTURE,
1861 &period_frames, &buffer_frames, tsched_frames,
1862 &b, &d, profile_set, &mapping)))
1867 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1868 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1871 SND_PCM_STREAM_CAPTURE,
1872 &period_frames, &buffer_frames, tsched_frames,
1877 pa_assert(u->device_name);
1878 pa_log_info("Successfully opened device %s.", u->device_name);
1880 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1881 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1886 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1888 if (use_mmap && !b) {
1889 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1890 u->use_mmap = use_mmap = FALSE;
1893 if (use_tsched && (!b || !d)) {
1894 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1895 u->use_tsched = use_tsched = FALSE;
1899 pa_log_info("Successfully enabled mmap() mode.");
1901 if (u->use_tsched) {
1902 pa_log_info("Successfully enabled timer-based scheduling mode.");
1903 if (u->fixed_latency_range)
1904 pa_log_info("Disabling latency range changes on overrun");
1907 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1909 pa_log_error("Failed to find any supported sample rates.");
1913 /* ALSA might tweak the sample spec, so recalculate the frame size */
1914 frame_size = pa_frame_size(&ss);
1916 if (!u->ucm_context)
1917 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1919 pa_source_new_data_init(&data);
1920 data.driver = driver;
1923 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1925 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1926 * variable instead of using &data.namereg_fail directly, because
1927 * data.namereg_fail is a bitfield and taking the address of a bitfield
1928 * variable is impossible. */
1929 namereg_fail = data.namereg_fail;
1930 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1931 pa_log("Failed to parse namereg_fail argument.");
1932 pa_source_new_data_done(&data);
1935 data.namereg_fail = namereg_fail;
1937 pa_source_new_data_set_sample_spec(&data, &ss);
1938 pa_source_new_data_set_channel_map(&data, &map);
1939 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1941 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1942 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1943 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1944 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1945 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1948 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1949 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1952 pa_alsa_init_description(data.proplist);
1954 if (u->control_device)
1955 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1957 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1958 pa_log("Invalid properties");
1959 pa_source_new_data_done(&data);
1964 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1965 else if (u->mixer_path_set)
1966 pa_alsa_add_ports(&data, u->mixer_path_set, card);
1968 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1969 pa_source_new_data_done(&data);
1972 pa_log("Failed to create source object");
1976 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1977 &u->source->thread_info.volume_change_safety_margin) < 0) {
1978 pa_log("Failed to parse deferred_volume_safety_margin parameter");
1982 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1983 &u->source->thread_info.volume_change_extra_delay) < 0) {
1984 pa_log("Failed to parse deferred_volume_extra_delay parameter");
1988 u->source->parent.process_msg = source_process_msg;
1990 u->source->update_requested_latency = source_update_requested_latency_cb;
1991 u->source->set_state = source_set_state_cb;
1993 u->source->set_port = source_set_port_ucm_cb;
1995 u->source->set_port = source_set_port_cb;
1996 if (u->source->alternate_sample_rate)
1997 u->source->update_rate = source_update_rate_cb;
1998 u->source->userdata = u;
2000 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2001 pa_source_set_rtpoll(u->source, u->rtpoll);
2003 u->frame_size = frame_size;
2004 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2005 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2006 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2008 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2009 (double) u->hwbuf_size / (double) u->fragment_size,
2010 (long unsigned) u->fragment_size,
2011 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2012 (long unsigned) u->hwbuf_size,
2013 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2015 if (u->use_tsched) {
2016 u->tsched_watermark_ref = tsched_watermark;
2017 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2020 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2024 if (update_sw_params(u) < 0)
2027 if (u->ucm_context) {
2028 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2030 } else if (setup_mixer(u, ignore_dB) < 0)
2033 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2035 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2036 pa_log("Failed to create thread.");
2040 /* Get initial mixer settings */
2041 if (data.volume_is_set) {
2042 if (u->source->set_volume)
2043 u->source->set_volume(u->source);
2045 if (u->source->get_volume)
2046 u->source->get_volume(u->source);
2049 if (data.muted_is_set) {
2050 if (u->source->set_mute)
2051 u->source->set_mute(u->source);
2053 if (u->source->get_mute)
2054 u->source->get_mute(u->source);
2057 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2058 u->source->write_volume(u->source);
2060 pa_source_put(u->source);
2063 pa_alsa_profile_set_free(profile_set);
2073 pa_alsa_profile_set_free(profile_set);
2078 static void userdata_free(struct userdata *u) {
2082 pa_source_unlink(u->source);
2085 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2086 pa_thread_free(u->thread);
2089 pa_thread_mq_done(&u->thread_mq);
2092 pa_source_unref(u->source);
2095 pa_alsa_mixer_pdata_free(u->mixer_pd);
2097 if (u->alsa_rtpoll_item)
2098 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2101 pa_rtpoll_free(u->rtpoll);
2103 if (u->pcm_handle) {
2104 snd_pcm_drop(u->pcm_handle);
2105 snd_pcm_close(u->pcm_handle);
2109 pa_alsa_fdlist_free(u->mixer_fdl);
2111 if (u->mixer_path && !u->mixer_path_set)
2112 pa_alsa_path_free(u->mixer_path);
2114 if (u->mixer_handle)
2115 snd_mixer_close(u->mixer_handle);
2118 pa_smoother_free(u->smoother);
2126 pa_xfree(u->device_name);
2127 pa_xfree(u->control_device);
2128 pa_xfree(u->paths_dir);
2132 void pa_alsa_source_free(pa_source *s) {
2135 pa_source_assert_ref(s);
2136 pa_assert_se(u = s->userdata);