2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <asoundlib.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
53 #include <modules/reserve-wrap.h>
55 #include "alsa-util.h"
56 #include "alsa-source.h"
58 /* #define DEBUG_TIMING */
60 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
89 pa_thread_mq thread_mq;
92 snd_pcm_t *pcm_handle;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
101 pa_cvolume hardware_volume;
110 tsched_watermark_ref,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
122 char *device_name; /* name of the PCM device */
123 char *control_device; /* name of the control device */
125 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
129 pa_rtpoll_item *alsa_rtpoll_item;
131 pa_smoother *smoother;
133 pa_usec_t smoother_interval;
134 pa_usec_t last_smoother_update;
136 pa_reserve_wrapper *reserve;
137 pa_hook_slot *reserve_slot;
138 pa_reserve_monitor_wrapper *monitor;
139 pa_hook_slot *monitor_slot;
142 pa_alsa_ucm_mapping_context *ucm_context;
145 static void userdata_free(struct userdata *u);
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
151 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
152 return PA_HOOK_CANCEL;
157 static void reserve_done(struct userdata *u) {
160 if (u->reserve_slot) {
161 pa_hook_slot_free(u->reserve_slot);
162 u->reserve_slot = NULL;
166 pa_reserve_wrapper_unref(u->reserve);
171 static void reserve_update(struct userdata *u) {
172 const char *description;
175 if (!u->source || !u->reserve)
178 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
182 static int reserve_init(struct userdata *u, const char *dname) {
191 if (pa_in_system_mode())
194 if (!(rname = pa_alsa_get_reserve_name(dname)))
197 /* We are resuming, try to lock the device */
198 u->reserve = pa_reserve_wrapper_get(u->core, rname);
206 pa_assert(!u->reserve_slot);
207 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
218 b = PA_PTR_TO_UINT(busy) && !u->reserve;
220 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
224 static void monitor_done(struct userdata *u) {
227 if (u->monitor_slot) {
228 pa_hook_slot_free(u->monitor_slot);
229 u->monitor_slot = NULL;
233 pa_reserve_monitor_wrapper_unref(u->monitor);
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
244 if (pa_in_system_mode())
247 if (!(rname = pa_alsa_get_reserve_name(dname)))
250 /* We are resuming, try to lock the device */
251 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
257 pa_assert(!u->monitor_slot);
258 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
263 static void fix_min_sleep_wakeup(struct userdata *u) {
264 size_t max_use, max_use_2;
267 pa_assert(u->use_tsched);
269 max_use = u->hwbuf_size - u->hwbuf_unused;
270 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
272 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
273 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
275 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
276 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
279 static void fix_tsched_watermark(struct userdata *u) {
282 pa_assert(u->use_tsched);
284 max_use = u->hwbuf_size - u->hwbuf_unused;
286 if (u->tsched_watermark > max_use - u->min_sleep)
287 u->tsched_watermark = max_use - u->min_sleep;
289 if (u->tsched_watermark < u->min_wakeup)
290 u->tsched_watermark = u->min_wakeup;
293 static void increase_watermark(struct userdata *u) {
294 size_t old_watermark;
295 pa_usec_t old_min_latency, new_min_latency;
298 pa_assert(u->use_tsched);
300 /* First, just try to increase the watermark */
301 old_watermark = u->tsched_watermark;
302 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
303 fix_tsched_watermark(u);
305 if (old_watermark != u->tsched_watermark) {
306 pa_log_info("Increasing wakeup watermark to %0.2f ms",
307 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
311 /* Hmm, we cannot increase the watermark any further, hence let's
312 raise the latency unless doing so was disabled in
314 if (u->fixed_latency_range)
317 old_min_latency = u->source->thread_info.min_latency;
318 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
321 if (old_min_latency != new_min_latency) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency / PA_USEC_PER_MSEC);
325 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
328 /* When we reach this we're officialy fucked! */
331 static void decrease_watermark(struct userdata *u) {
332 size_t old_watermark;
336 pa_assert(u->use_tsched);
338 now = pa_rtclock_now();
340 if (u->watermark_dec_not_before <= 0)
343 if (u->watermark_dec_not_before > now)
346 old_watermark = u->tsched_watermark;
348 if (u->tsched_watermark < u->watermark_dec_step)
349 u->tsched_watermark = u->tsched_watermark / 2;
351 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353 fix_tsched_watermark(u);
355 if (old_watermark != u->tsched_watermark)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
359 /* We don't change the latency range*/
362 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
368 pa_assert(sleep_usec);
369 pa_assert(process_usec);
372 pa_assert(u->use_tsched);
374 usec = pa_source_get_requested_latency_within_thread(u->source);
376 if (usec == (pa_usec_t) -1)
377 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
379 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
384 *sleep_usec = usec - wm;
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
395 static int try_recover(struct userdata *u, const char *call, int err) {
400 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402 pa_assert(err != -EAGAIN);
405 pa_log_debug("%s: Buffer overrun!", call);
407 if (err == -ESTRPIPE)
408 pa_log_debug("%s: System suspended!", call);
410 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411 pa_log("%s: %s", call, pa_alsa_strerror(err));
419 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
420 size_t left_to_record;
421 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
422 pa_bool_t overrun = FALSE;
424 /* We use <= instead of < for this check here because an overrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
429 if (n_bytes <= rec_space)
430 left_to_record = rec_space - n_bytes;
433 /* We got a dropout. What a mess! */
441 if (pa_log_ratelimit(PA_LOG_INFO))
442 pa_log_info("Overrun!");
446 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
450 pa_bool_t reset_not_before = TRUE;
452 if (overrun || left_to_record < u->watermark_inc_threshold)
453 increase_watermark(u);
454 else if (left_to_record > u->watermark_dec_threshold) {
455 reset_not_before = FALSE;
457 /* We decrease the watermark only if have actually
458 * been woken up by a timeout. If something else woke
459 * us up it's too easy to fulfill the deadlines... */
462 decrease_watermark(u);
465 if (reset_not_before)
466 u->watermark_dec_not_before = 0;
469 return left_to_record;
472 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473 pa_bool_t work_done = FALSE;
474 pa_usec_t max_sleep_usec = 0, process_usec = 0;
475 size_t left_to_record;
479 pa_source_assert_ref(u->source);
482 hw_sleep_time(u, &max_sleep_usec, &process_usec);
488 pa_bool_t after_avail = TRUE;
490 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
492 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
498 n_bytes = (size_t) n * u->frame_size;
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
504 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
509 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
511 pa_log_debug("Not reading, because too early.");
516 if (PA_UNLIKELY(n_bytes <= 0)) {
520 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
521 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
529 pa_log_debug("Not reading, because not necessary.");
537 pa_log_debug("Not filling up, because already too many iterations.");
546 pa_log_debug("Reading");
553 const snd_pcm_channel_area_t *areas;
554 snd_pcm_uframes_t offset, frames;
555 snd_pcm_sframes_t sframes;
557 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
558 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
560 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
562 if (!after_avail && err == -EAGAIN)
565 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
571 /* Make sure that if these memblocks need to be copied they will fit into one slot */
572 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
573 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
575 if (!after_avail && frames == 0)
578 pa_assert(frames > 0);
581 /* Check these are multiples of 8 bit */
582 pa_assert((areas[0].first & 7) == 0);
583 pa_assert((areas[0].step & 7)== 0);
585 /* We assume a single interleaved memory buffer */
586 pa_assert((areas[0].first >> 3) == 0);
587 pa_assert((areas[0].step >> 3) == u->frame_size);
589 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
591 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
592 chunk.length = pa_memblock_get_length(chunk.memblock);
595 pa_source_post(u->source, &chunk);
596 pa_memblock_unref_fixed(chunk.memblock);
598 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
600 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
608 u->read_count += frames * u->frame_size;
611 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
614 if ((size_t) frames * u->frame_size >= n_bytes)
617 n_bytes -= (size_t) frames * u->frame_size;
622 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
623 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
625 if (*sleep_usec > process_usec)
626 *sleep_usec -= process_usec;
631 return work_done ? 1 : 0;
634 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
635 int work_done = FALSE;
636 pa_usec_t max_sleep_usec = 0, process_usec = 0;
637 size_t left_to_record;
641 pa_source_assert_ref(u->source);
644 hw_sleep_time(u, &max_sleep_usec, &process_usec);
650 pa_bool_t after_avail = TRUE;
652 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
654 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
660 n_bytes = (size_t) n * u->frame_size;
661 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
666 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
669 if (PA_UNLIKELY(n_bytes <= 0)) {
673 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
674 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
686 pa_log_debug("Not filling up, because already too many iterations.");
696 snd_pcm_sframes_t frames;
699 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
701 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
703 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
704 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
706 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
708 p = pa_memblock_acquire(chunk.memblock);
709 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
710 pa_memblock_release(chunk.memblock);
712 if (PA_UNLIKELY(frames < 0)) {
713 pa_memblock_unref(chunk.memblock);
715 if (!after_avail && (int) frames == -EAGAIN)
718 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
724 if (!after_avail && frames == 0) {
725 pa_memblock_unref(chunk.memblock);
729 pa_assert(frames > 0);
733 chunk.length = (size_t) frames * u->frame_size;
735 pa_source_post(u->source, &chunk);
736 pa_memblock_unref(chunk.memblock);
740 u->read_count += frames * u->frame_size;
742 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
744 if ((size_t) frames * u->frame_size >= n_bytes)
747 n_bytes -= (size_t) frames * u->frame_size;
752 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
753 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
755 if (*sleep_usec > process_usec)
756 *sleep_usec -= process_usec;
761 return work_done ? 1 : 0;
764 static void update_smoother(struct userdata *u) {
765 snd_pcm_sframes_t delay = 0;
768 pa_usec_t now1 = 0, now2;
769 snd_pcm_status_t *status;
770 snd_htimestamp_t htstamp = { 0, 0 };
772 snd_pcm_status_alloca(&status);
775 pa_assert(u->pcm_handle);
777 /* Let's update the time smoother */
779 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
780 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
784 snd_pcm_status_get_htstamp(status, &htstamp);
785 now1 = pa_timespec_load(&htstamp);
787 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
789 now1 = pa_rtclock_now();
791 /* check if the time since the last update is bigger than the interval */
792 if (u->last_smoother_update > 0)
793 if (u->last_smoother_update + u->smoother_interval > now1)
796 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
797 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
799 pa_smoother_put(u->smoother, now1, now2);
801 u->last_smoother_update = now1;
802 /* exponentially increase the update interval up to the MAX limit */
803 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
806 static pa_usec_t source_get_latency(struct userdata *u) {
808 pa_usec_t now1, now2;
812 now1 = pa_rtclock_now();
813 now2 = pa_smoother_get(u->smoother, now1);
815 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
817 return delay >= 0 ? (pa_usec_t) delay : 0;
820 static int build_pollfd(struct userdata *u) {
822 pa_assert(u->pcm_handle);
824 if (u->alsa_rtpoll_item)
825 pa_rtpoll_item_free(u->alsa_rtpoll_item);
827 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
833 /* Called from IO context */
834 static int suspend(struct userdata *u) {
836 pa_assert(u->pcm_handle);
838 pa_smoother_pause(u->smoother, pa_rtclock_now());
841 snd_pcm_close(u->pcm_handle);
842 u->pcm_handle = NULL;
844 if (u->alsa_rtpoll_item) {
845 pa_rtpoll_item_free(u->alsa_rtpoll_item);
846 u->alsa_rtpoll_item = NULL;
849 pa_log_info("Device suspended...");
854 /* Called from IO context */
855 static int update_sw_params(struct userdata *u) {
856 snd_pcm_uframes_t avail_min;
861 /* Use the full buffer if no one asked us for anything specific */
867 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
870 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
872 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
874 /* We need at least one sample in our buffer */
876 if (PA_UNLIKELY(b < u->frame_size))
879 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
882 fix_min_sleep_wakeup(u);
883 fix_tsched_watermark(u);
886 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
891 pa_usec_t sleep_usec, process_usec;
893 hw_sleep_time(u, &sleep_usec, &process_usec);
894 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
897 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
899 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
900 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
907 /* Called from IO Context on unsuspend or from main thread when creating source */
908 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
911 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
912 &u->source->sample_spec);
914 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
915 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
917 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
918 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
920 fix_min_sleep_wakeup(u);
921 fix_tsched_watermark(u);
924 pa_source_set_latency_range_within_thread(u->source,
926 pa_bytes_to_usec(u->hwbuf_size, ss));
928 pa_source_set_latency_range(u->source,
930 pa_bytes_to_usec(u->hwbuf_size, ss));
932 /* work-around assert in pa_source_set_latency_within_thead,
933 keep track of min_latency and reuse it when
934 this routine is called from IO context */
935 u->min_latency_ref = u->source->thread_info.min_latency;
938 pa_log_info("Time scheduling watermark is %0.2fms",
939 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
942 /* Called from IO context */
943 static int unsuspend(struct userdata *u) {
947 snd_pcm_uframes_t period_size, buffer_size;
950 pa_assert(!u->pcm_handle);
952 pa_log_info("Trying resume...");
954 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
956 SND_PCM_NO_AUTO_RESAMPLE|
957 SND_PCM_NO_AUTO_CHANNELS|
958 SND_PCM_NO_AUTO_FORMAT)) < 0) {
959 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
963 ss = u->source->sample_spec;
964 period_size = u->fragment_size / u->frame_size;
965 buffer_size = u->hwbuf_size / u->frame_size;
969 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
970 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
974 if (b != u->use_mmap || d != u->use_tsched) {
975 pa_log_warn("Resume failed, couldn't get original access mode.");
979 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
980 pa_log_warn("Resume failed, couldn't restore original sample settings.");
984 if (period_size*u->frame_size != u->fragment_size ||
985 buffer_size*u->frame_size != u->hwbuf_size) {
986 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
987 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
988 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
992 if (update_sw_params(u) < 0)
995 if (build_pollfd(u) < 0)
998 /* FIXME: We need to reload the volume somehow */
1001 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1002 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1003 u->last_smoother_update = 0;
1007 /* reset the watermark to the value defined when source was created */
1009 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1011 pa_log_info("Resumed successfully...");
1016 if (u->pcm_handle) {
1017 snd_pcm_close(u->pcm_handle);
1018 u->pcm_handle = NULL;
1024 /* Called from IO context */
1025 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1026 struct userdata *u = PA_SOURCE(o)->userdata;
1030 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1034 r = source_get_latency(u);
1036 *((pa_usec_t*) data) = r;
1041 case PA_SOURCE_MESSAGE_SET_STATE:
1043 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1045 case PA_SOURCE_SUSPENDED: {
1048 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1050 if ((r = suspend(u)) < 0)
1056 case PA_SOURCE_IDLE:
1057 case PA_SOURCE_RUNNING: {
1060 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1061 if (build_pollfd(u) < 0)
1065 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1066 if ((r = unsuspend(u)) < 0)
1073 case PA_SOURCE_UNLINKED:
1074 case PA_SOURCE_INIT:
1075 case PA_SOURCE_INVALID_STATE:
1082 return pa_source_process_msg(o, code, data, offset, chunk);
1085 /* Called from main context */
1086 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1087 pa_source_state_t old_state;
1090 pa_source_assert_ref(s);
1091 pa_assert_se(u = s->userdata);
1093 old_state = pa_source_get_state(u->source);
1095 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1097 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1098 if (reserve_init(u, u->device_name) < 0)
1099 return -PA_ERR_BUSY;
1104 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1105 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1108 pa_assert(u->mixer_handle);
1110 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1113 if (!PA_SOURCE_IS_LINKED(u->source->state))
1116 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1117 pa_source_set_mixer_dirty(u->source, TRUE);
1121 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1122 pa_source_get_volume(u->source, TRUE);
1123 pa_source_get_mute(u->source, TRUE);
1129 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1130 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1133 pa_assert(u->mixer_handle);
1135 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1138 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1139 pa_source_set_mixer_dirty(u->source, TRUE);
1143 if (mask & SND_CTL_EVENT_MASK_VALUE)
1144 pa_source_update_volume_and_mute(u->source);
1149 static void source_get_volume_cb(pa_source *s) {
1150 struct userdata *u = s->userdata;
1152 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1155 pa_assert(u->mixer_path);
1156 pa_assert(u->mixer_handle);
1158 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1161 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1162 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1164 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1166 if (u->mixer_path->has_dB) {
1167 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1169 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1172 if (pa_cvolume_equal(&u->hardware_volume, &r))
1175 s->real_volume = u->hardware_volume = r;
1177 /* Hmm, so the hardware volume changed, let's reset our software volume */
1178 if (u->mixer_path->has_dB)
1179 pa_source_set_soft_volume(s, NULL);
1182 static void source_set_volume_cb(pa_source *s) {
1183 struct userdata *u = s->userdata;
1185 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1186 pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1189 pa_assert(u->mixer_path);
1190 pa_assert(u->mixer_handle);
1192 /* Shift up by the base volume */
1193 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1195 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1198 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1199 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1201 u->hardware_volume = r;
1203 if (u->mixer_path->has_dB) {
1204 pa_cvolume new_soft_volume;
1205 pa_bool_t accurate_enough;
1206 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1208 /* Match exactly what the user requested by software */
1209 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1211 /* If the adjustment to do in software is only minimal we
1212 * can skip it. That saves us CPU at the expense of a bit of
1215 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1216 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1218 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1219 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1220 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1221 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1222 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1223 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1224 pa_yes_no(accurate_enough));
1225 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1227 if (!accurate_enough)
1228 s->soft_volume = new_soft_volume;
1231 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1233 /* We can't match exactly what the user requested, hence let's
1234 * at least tell the user about it */
1240 static void source_write_volume_cb(pa_source *s) {
1241 struct userdata *u = s->userdata;
1242 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1245 pa_assert(u->mixer_path);
1246 pa_assert(u->mixer_handle);
1247 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1249 /* Shift up by the base volume */
1250 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1252 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1253 pa_log_error("Writing HW volume failed");
1256 pa_bool_t accurate_enough;
1258 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1261 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1263 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1264 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1266 if (!accurate_enough) {
1268 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1269 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1272 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1273 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1274 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1275 pa_log_debug(" in dB: %s (request) != %s",
1276 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1277 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1282 static void source_get_mute_cb(pa_source *s) {
1283 struct userdata *u = s->userdata;
1287 pa_assert(u->mixer_path);
1288 pa_assert(u->mixer_handle);
1290 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1296 static void source_set_mute_cb(pa_source *s) {
1297 struct userdata *u = s->userdata;
1300 pa_assert(u->mixer_path);
1301 pa_assert(u->mixer_handle);
1303 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1306 static void mixer_volume_init(struct userdata *u) {
1309 if (!u->mixer_path->has_volume) {
1310 pa_source_set_write_volume_callback(u->source, NULL);
1311 pa_source_set_get_volume_callback(u->source, NULL);
1312 pa_source_set_set_volume_callback(u->source, NULL);
1314 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1316 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1317 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1319 if (u->mixer_path->has_dB && u->deferred_volume) {
1320 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1321 pa_log_info("Successfully enabled deferred volume.");
1323 pa_source_set_write_volume_callback(u->source, NULL);
1325 if (u->mixer_path->has_dB) {
1326 pa_source_enable_decibel_volume(u->source, TRUE);
1327 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1329 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1330 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1332 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1334 pa_source_enable_decibel_volume(u->source, FALSE);
1335 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1337 u->source->base_volume = PA_VOLUME_NORM;
1338 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1341 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1344 if (!u->mixer_path->has_mute) {
1345 pa_source_set_get_mute_callback(u->source, NULL);
1346 pa_source_set_set_mute_callback(u->source, NULL);
1347 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1349 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1350 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1351 pa_log_info("Using hardware mute control.");
1355 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1356 struct userdata *u = s->userdata;
1360 pa_assert(u->ucm_context);
1362 return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1365 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1366 struct userdata *u = s->userdata;
1367 pa_alsa_port_data *data;
1371 pa_assert(u->mixer_handle);
1373 data = PA_DEVICE_PORT_DATA(p);
1375 pa_assert_se(u->mixer_path = data->path);
1376 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1378 mixer_volume_init(u);
1382 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1383 if (s->write_volume)
1393 static void source_update_requested_latency_cb(pa_source *s) {
1394 struct userdata *u = s->userdata;
1396 pa_assert(u->use_tsched); /* only when timer scheduling is used
1397 * we can dynamically adjust the
1403 update_sw_params(u);
1406 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1408 struct userdata *u = s->userdata;
1410 pa_bool_t supported = FALSE;
1414 for (i = 0; u->rates[i]; i++) {
1415 if (u->rates[i] == rate) {
1422 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1426 if (!PA_SOURCE_IS_OPENED(s->state)) {
1427 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1428 u->source->sample_spec.rate = rate;
1435 static void thread_func(void *userdata) {
1436 struct userdata *u = userdata;
1437 unsigned short revents = 0;
1441 pa_log_debug("Thread starting up");
1443 if (u->core->realtime_scheduling)
1444 pa_make_realtime(u->core->realtime_priority);
1446 pa_thread_mq_install(&u->thread_mq);
1450 pa_usec_t rtpoll_sleep = 0, real_sleep;
1453 pa_log_debug("Loop");
1456 /* Read some data and pass it to the sources */
1457 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1459 pa_usec_t sleep_usec = 0;
1460 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1463 pa_log_info("Starting capture.");
1464 snd_pcm_start(u->pcm_handle);
1466 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1472 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1474 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1479 /* pa_log_debug("work_done = %i", work_done); */
1484 if (u->use_tsched) {
1487 /* OK, the capture buffer is now empty, let's
1488 * calculate when to wake up next */
1490 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1492 /* Convert from the sound card time domain to the
1493 * system time domain */
1494 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1496 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1498 /* We don't trust the conversion, so we wake up whatever comes first */
1499 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1503 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1504 pa_usec_t volume_sleep;
1505 pa_source_volume_change_apply(u->source, &volume_sleep);
1506 if (volume_sleep > 0) {
1507 if (rtpoll_sleep > 0)
1508 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1510 rtpoll_sleep = volume_sleep;
1514 if (rtpoll_sleep > 0) {
1515 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1516 real_sleep = pa_rtclock_now();
1519 pa_rtpoll_set_timer_disabled(u->rtpoll);
1521 /* Hmm, nothing to do. Let's sleep */
1522 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1525 if (rtpoll_sleep > 0) {
1526 real_sleep = pa_rtclock_now() - real_sleep;
1528 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1529 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1530 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1532 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1533 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1534 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1537 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1538 pa_source_volume_change_apply(u->source, NULL);
1543 /* Tell ALSA about this and process its response */
1544 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1545 struct pollfd *pollfd;
1549 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1551 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1552 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1556 if (revents & ~POLLIN) {
1557 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1562 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1563 pa_log_debug("Wakeup from ALSA!");
1570 /* If this was no regular exit from the loop we have to continue
1571 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1572 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1573 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1576 pa_log_debug("Thread shutting down");
1579 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1585 pa_assert(device_name);
1587 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1588 pa_source_new_data_set_name(data, n);
1589 data->namereg_fail = TRUE;
1593 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1594 data->namereg_fail = TRUE;
1596 n = device_id ? device_id : device_name;
1597 data->namereg_fail = FALSE;
1601 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1603 t = pa_sprintf_malloc("alsa_input.%s", n);
1605 pa_source_new_data_set_name(data, t);
1609 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1612 if (!mapping && !element)
1615 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1616 pa_log_info("Failed to find a working mixer device.");
1622 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1625 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1628 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1629 pa_alsa_path_dump(u->mixer_path);
1630 } else if (!(u->mixer_path_set = mapping->input_path_set))
1637 if (u->mixer_path) {
1638 pa_alsa_path_free(u->mixer_path);
1639 u->mixer_path = NULL;
1642 if (u->mixer_handle) {
1643 snd_mixer_close(u->mixer_handle);
1644 u->mixer_handle = NULL;
1648 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1649 pa_bool_t need_mixer_callback = FALSE;
1653 if (!u->mixer_handle)
1656 if (u->source->active_port) {
1657 pa_alsa_port_data *data;
1659 /* We have a list of supported paths, so let's activate the
1660 * one that has been chosen as active */
1662 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1663 u->mixer_path = data->path;
1665 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1669 if (!u->mixer_path && u->mixer_path_set)
1670 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1672 if (u->mixer_path) {
1673 /* Hmm, we have only a single path, then let's activate it */
1675 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1680 mixer_volume_init(u);
1682 /* Will we need to register callbacks? */
1683 if (u->mixer_path_set && u->mixer_path_set->paths) {
1687 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1688 if (p->has_volume || p->has_mute)
1689 need_mixer_callback = TRUE;
1692 else if (u->mixer_path)
1693 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1695 if (need_mixer_callback) {
1696 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1697 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1698 u->mixer_pd = pa_alsa_mixer_pdata_new();
1699 mixer_callback = io_mixer_callback;
1701 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1702 pa_log("Failed to initialize file descriptor monitoring");
1706 u->mixer_fdl = pa_alsa_fdlist_new();
1707 mixer_callback = ctl_mixer_callback;
1709 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1710 pa_log("Failed to initialize file descriptor monitoring");
1715 if (u->mixer_path_set)
1716 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1718 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1724 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1726 struct userdata *u = NULL;
1727 const char *dev_id = NULL, *key, *mod_name;
1729 uint32_t alternate_sample_rate;
1731 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1732 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1734 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1735 pa_source_new_data data;
1736 pa_alsa_profile_set *profile_set = NULL;
1742 ss = m->core->default_sample_spec;
1743 map = m->core->default_channel_map;
1744 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1745 pa_log("Failed to parse sample specification and channel map");
1749 alternate_sample_rate = m->core->alternate_sample_rate;
1750 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1751 pa_log("Failed to parse alternate sample rate");
1755 frame_size = pa_frame_size(&ss);
1757 nfrags = m->core->default_n_fragments;
1758 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1760 frag_size = (uint32_t) frame_size;
1761 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1762 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1764 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1765 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1766 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1767 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1768 pa_log("Failed to parse buffer metrics");
1772 buffer_size = nfrags * frag_size;
1774 period_frames = frag_size/frame_size;
1775 buffer_frames = buffer_size/frame_size;
1776 tsched_frames = tsched_size/frame_size;
1778 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1779 pa_log("Failed to parse mmap argument.");
1783 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1784 pa_log("Failed to parse tsched argument.");
1788 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1789 pa_log("Failed to parse ignore_dB argument.");
1793 deferred_volume = m->core->deferred_volume;
1794 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1795 pa_log("Failed to parse deferred_volume argument.");
1799 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1800 pa_log("Failed to parse fixed_latency_range argument.");
1804 use_tsched = pa_alsa_may_tsched(use_tsched);
1806 u = pa_xnew0(struct userdata, 1);
1809 u->use_mmap = use_mmap;
1810 u->use_tsched = use_tsched;
1811 u->deferred_volume = deferred_volume;
1812 u->fixed_latency_range = fixed_latency_range;
1814 u->rtpoll = pa_rtpoll_new();
1815 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1817 u->smoother = pa_smoother_new(
1818 SMOOTHER_ADJUST_USEC,
1819 SMOOTHER_WINDOW_USEC,
1825 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1828 if (mapping && mapping->ucm_context.ucm)
1829 u->ucm_context = &mapping->ucm_context;
1831 dev_id = pa_modargs_get_value(
1833 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1835 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1837 if (reserve_init(u, dev_id) < 0)
1840 if (reserve_monitor_init(u, dev_id) < 0)
1848 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1849 pa_log("device_id= not set");
1853 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1854 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1855 pa_log("Failed to enable ucm modifier %s", mod_name);
1857 pa_log_debug("Enabled ucm modifier %s", mod_name);
1860 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1864 SND_PCM_STREAM_CAPTURE,
1865 &period_frames, &buffer_frames, tsched_frames,
1869 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1871 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1874 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1878 SND_PCM_STREAM_CAPTURE,
1879 &period_frames, &buffer_frames, tsched_frames,
1880 &b, &d, profile_set, &mapping)))
1885 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1886 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1889 SND_PCM_STREAM_CAPTURE,
1890 &period_frames, &buffer_frames, tsched_frames,
1895 pa_assert(u->device_name);
1896 pa_log_info("Successfully opened device %s.", u->device_name);
1898 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1899 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1904 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1906 if (use_mmap && !b) {
1907 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1908 u->use_mmap = use_mmap = FALSE;
1911 if (use_tsched && (!b || !d)) {
1912 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1913 u->use_tsched = use_tsched = FALSE;
1917 pa_log_info("Successfully enabled mmap() mode.");
1919 if (u->use_tsched) {
1920 pa_log_info("Successfully enabled timer-based scheduling mode.");
1921 if (u->fixed_latency_range)
1922 pa_log_info("Disabling latency range changes on overrun");
1925 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1927 pa_log_error("Failed to find any supported sample rates.");
1931 /* ALSA might tweak the sample spec, so recalculate the frame size */
1932 frame_size = pa_frame_size(&ss);
1934 if (!u->ucm_context)
1935 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1937 pa_source_new_data_init(&data);
1938 data.driver = driver;
1941 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1943 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1944 * variable instead of using &data.namereg_fail directly, because
1945 * data.namereg_fail is a bitfield and taking the address of a bitfield
1946 * variable is impossible. */
1947 namereg_fail = data.namereg_fail;
1948 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1949 pa_log("Failed to parse namereg_fail argument.");
1950 pa_source_new_data_done(&data);
1953 data.namereg_fail = namereg_fail;
1955 pa_source_new_data_set_sample_spec(&data, &ss);
1956 pa_source_new_data_set_channel_map(&data, &map);
1957 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1959 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1960 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1961 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1962 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1963 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1966 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1967 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1969 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1970 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1973 pa_alsa_init_description(data.proplist);
1975 if (u->control_device)
1976 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1978 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1979 pa_log("Invalid properties");
1980 pa_source_new_data_done(&data);
1985 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1986 else if (u->mixer_path_set)
1987 pa_alsa_add_ports(&data, u->mixer_path_set, card);
1989 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1990 pa_source_new_data_done(&data);
1993 pa_log("Failed to create source object");
1997 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1998 &u->source->thread_info.volume_change_safety_margin) < 0) {
1999 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2003 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2004 &u->source->thread_info.volume_change_extra_delay) < 0) {
2005 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2009 u->source->parent.process_msg = source_process_msg;
2011 u->source->update_requested_latency = source_update_requested_latency_cb;
2012 u->source->set_state = source_set_state_cb;
2014 u->source->set_port = source_set_port_ucm_cb;
2016 u->source->set_port = source_set_port_cb;
2017 if (u->source->alternate_sample_rate)
2018 u->source->update_rate = source_update_rate_cb;
2019 u->source->userdata = u;
2021 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2022 pa_source_set_rtpoll(u->source, u->rtpoll);
2024 u->frame_size = frame_size;
2025 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2026 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2027 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2029 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2030 (double) u->hwbuf_size / (double) u->fragment_size,
2031 (long unsigned) u->fragment_size,
2032 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2033 (long unsigned) u->hwbuf_size,
2034 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2036 if (u->use_tsched) {
2037 u->tsched_watermark_ref = tsched_watermark;
2038 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2041 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2045 if (update_sw_params(u) < 0)
2048 if (u->ucm_context) {
2049 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2051 } else if (setup_mixer(u, ignore_dB) < 0)
2054 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2056 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2057 pa_log("Failed to create thread.");
2061 /* Get initial mixer settings */
2062 if (data.volume_is_set) {
2063 if (u->source->set_volume)
2064 u->source->set_volume(u->source);
2066 if (u->source->get_volume)
2067 u->source->get_volume(u->source);
2070 if (data.muted_is_set) {
2071 if (u->source->set_mute)
2072 u->source->set_mute(u->source);
2074 if (u->source->get_mute)
2075 u->source->get_mute(u->source);
2078 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2079 u->source->write_volume(u->source);
2081 pa_source_put(u->source);
2084 pa_alsa_profile_set_free(profile_set);
2094 pa_alsa_profile_set_free(profile_set);
2099 static void userdata_free(struct userdata *u) {
2103 pa_source_unlink(u->source);
2106 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2107 pa_thread_free(u->thread);
2110 pa_thread_mq_done(&u->thread_mq);
2113 pa_source_unref(u->source);
2116 pa_alsa_mixer_pdata_free(u->mixer_pd);
2118 if (u->alsa_rtpoll_item)
2119 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2122 pa_rtpoll_free(u->rtpoll);
2124 if (u->pcm_handle) {
2125 snd_pcm_drop(u->pcm_handle);
2126 snd_pcm_close(u->pcm_handle);
2130 pa_alsa_fdlist_free(u->mixer_fdl);
2132 if (u->mixer_path && !u->mixer_path_set)
2133 pa_alsa_path_free(u->mixer_path);
2135 if (u->mixer_handle)
2136 snd_mixer_close(u->mixer_handle);
2139 pa_smoother_free(u->smoother);
2147 pa_xfree(u->device_name);
2148 pa_xfree(u->control_device);
2149 pa_xfree(u->paths_dir);
2153 void pa_alsa_source_free(pa_source *s) {
2156 pa_source_assert_ref(s);
2157 pa_assert_se(u = s->userdata);