2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
52 #include <modules/reserve-wrap.h>
54 #include "alsa-util.h"
55 #include "alsa-source.h"
57 /* #define DEBUG_TIMING */
59 #define DEFAULT_DEVICE "default"
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
75 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
88 pa_thread_mq thread_mq;
91 snd_pcm_t *pcm_handle;
93 pa_alsa_fdlist *mixer_fdl;
94 pa_alsa_mixer_pdata *mixer_pd;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
99 pa_cvolume hardware_volume;
111 watermark_inc_threshold,
112 watermark_dec_threshold;
114 pa_usec_t watermark_dec_not_before;
116 char *device_name; /* name of the PCM device */
117 char *control_device; /* name of the control device */
119 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
123 pa_rtpoll_item *alsa_rtpoll_item;
125 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
127 pa_smoother *smoother;
129 pa_usec_t smoother_interval;
130 pa_usec_t last_smoother_update;
132 pa_reserve_wrapper *reserve;
133 pa_hook_slot *reserve_slot;
134 pa_reserve_monitor_wrapper *monitor;
135 pa_hook_slot *monitor_slot;
138 static void userdata_free(struct userdata *u);
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
144 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145 return PA_HOOK_CANCEL;
150 static void reserve_done(struct userdata *u) {
153 if (u->reserve_slot) {
154 pa_hook_slot_free(u->reserve_slot);
155 u->reserve_slot = NULL;
159 pa_reserve_wrapper_unref(u->reserve);
164 static void reserve_update(struct userdata *u) {
165 const char *description;
168 if (!u->source || !u->reserve)
171 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
175 static int reserve_init(struct userdata *u, const char *dname) {
184 if (pa_in_system_mode())
187 if (!(rname = pa_alsa_get_reserve_name(dname)))
190 /* We are resuming, try to lock the device */
191 u->reserve = pa_reserve_wrapper_get(u->core, rname);
199 pa_assert(!u->reserve_slot);
200 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
211 b = PA_PTR_TO_UINT(busy) && !u->reserve;
213 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
217 static void monitor_done(struct userdata *u) {
220 if (u->monitor_slot) {
221 pa_hook_slot_free(u->monitor_slot);
222 u->monitor_slot = NULL;
226 pa_reserve_monitor_wrapper_unref(u->monitor);
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
237 if (pa_in_system_mode())
240 if (!(rname = pa_alsa_get_reserve_name(dname)))
243 /* We are resuming, try to lock the device */
244 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
250 pa_assert(!u->monitor_slot);
251 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257 size_t max_use, max_use_2;
260 pa_assert(u->use_tsched);
262 max_use = u->hwbuf_size - u->hwbuf_unused;
263 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
265 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
268 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
272 static void fix_tsched_watermark(struct userdata *u) {
275 pa_assert(u->use_tsched);
277 max_use = u->hwbuf_size - u->hwbuf_unused;
279 if (u->tsched_watermark > max_use - u->min_sleep)
280 u->tsched_watermark = max_use - u->min_sleep;
282 if (u->tsched_watermark < u->min_wakeup)
283 u->tsched_watermark = u->min_wakeup;
286 static void increase_watermark(struct userdata *u) {
287 size_t old_watermark;
288 pa_usec_t old_min_latency, new_min_latency;
291 pa_assert(u->use_tsched);
293 /* First, just try to increase the watermark */
294 old_watermark = u->tsched_watermark;
295 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296 fix_tsched_watermark(u);
298 if (old_watermark != u->tsched_watermark) {
299 pa_log_info("Increasing wakeup watermark to %0.2f ms",
300 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
304 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305 old_min_latency = u->source->thread_info.min_latency;
306 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
309 if (old_min_latency != new_min_latency) {
310 pa_log_info("Increasing minimal latency to %0.2f ms",
311 (double) new_min_latency / PA_USEC_PER_MSEC);
313 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
316 /* When we reach this we're officialy fucked! */
319 static void decrease_watermark(struct userdata *u) {
320 size_t old_watermark;
324 pa_assert(u->use_tsched);
326 now = pa_rtclock_now();
328 if (u->watermark_dec_not_before <= 0)
331 if (u->watermark_dec_not_before > now)
334 old_watermark = u->tsched_watermark;
336 if (u->tsched_watermark < u->watermark_dec_step)
337 u->tsched_watermark = u->tsched_watermark / 2;
339 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
341 fix_tsched_watermark(u);
343 if (old_watermark != u->tsched_watermark)
344 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
347 /* We don't change the latency range*/
350 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
356 pa_assert(sleep_usec);
357 pa_assert(process_usec);
360 pa_assert(u->use_tsched);
362 usec = pa_source_get_requested_latency_within_thread(u->source);
364 if (usec == (pa_usec_t) -1)
365 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
367 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
372 *sleep_usec = usec - wm;
376 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377 (unsigned long) (usec / PA_USEC_PER_MSEC),
378 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
383 static int try_recover(struct userdata *u, const char *call, int err) {
388 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
390 pa_assert(err != -EAGAIN);
393 pa_log_debug("%s: Buffer overrun!", call);
395 if (err == -ESTRPIPE)
396 pa_log_debug("%s: System suspended!", call);
398 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399 pa_log("%s: %s", call, pa_alsa_strerror(err));
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408 size_t left_to_record;
409 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410 pa_bool_t overrun = FALSE;
412 /* We use <= instead of < for this check here because an overrun
413 * only happens after the last sample was processed, not already when
414 * it is removed from the buffer. This is particularly important
415 * when block transfer is used. */
417 if (n_bytes <= rec_space)
418 left_to_record = rec_space - n_bytes;
421 /* We got a dropout. What a mess! */
429 if (pa_log_ratelimit(PA_LOG_INFO))
430 pa_log_info("Overrun!");
434 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
438 pa_bool_t reset_not_before = TRUE;
440 if (overrun || left_to_record < u->watermark_inc_threshold)
441 increase_watermark(u);
442 else if (left_to_record > u->watermark_dec_threshold) {
443 reset_not_before = FALSE;
445 /* We decrease the watermark only if have actually
446 * been woken up by a timeout. If something else woke
447 * us up it's too easy to fulfill the deadlines... */
450 decrease_watermark(u);
453 if (reset_not_before)
454 u->watermark_dec_not_before = 0;
457 return left_to_record;
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461 pa_bool_t work_done = FALSE;
462 pa_usec_t max_sleep_usec = 0, process_usec = 0;
463 size_t left_to_record;
467 pa_source_assert_ref(u->source);
470 hw_sleep_time(u, &max_sleep_usec, &process_usec);
476 pa_bool_t after_avail = TRUE;
478 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
480 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
486 n_bytes = (size_t) n * u->frame_size;
489 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
497 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
499 pa_log_debug("Not reading, because too early.");
504 if (PA_UNLIKELY(n_bytes <= 0)) {
508 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
517 pa_log_debug("Not reading, because not necessary.");
525 pa_log_debug("Not filling up, because already too many iterations.");
534 pa_log_debug("Reading");
541 const snd_pcm_channel_area_t *areas;
542 snd_pcm_uframes_t offset, frames;
543 snd_pcm_sframes_t sframes;
545 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
548 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
550 if (!after_avail && err == -EAGAIN)
553 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
559 /* Make sure that if these memblocks need to be copied they will fit into one slot */
560 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
561 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
563 if (!after_avail && frames == 0)
566 pa_assert(frames > 0);
569 /* Check these are multiples of 8 bit */
570 pa_assert((areas[0].first & 7) == 0);
571 pa_assert((areas[0].step & 7)== 0);
573 /* We assume a single interleaved memory buffer */
574 pa_assert((areas[0].first >> 3) == 0);
575 pa_assert((areas[0].step >> 3) == u->frame_size);
577 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
579 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580 chunk.length = pa_memblock_get_length(chunk.memblock);
583 pa_source_post(u->source, &chunk);
584 pa_memblock_unref_fixed(chunk.memblock);
586 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
588 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
596 u->read_count += frames * u->frame_size;
599 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 if ((size_t) frames * u->frame_size >= n_bytes)
605 n_bytes -= (size_t) frames * u->frame_size;
610 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
612 if (*sleep_usec > process_usec)
613 *sleep_usec -= process_usec;
618 return work_done ? 1 : 0;
621 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
622 int work_done = FALSE;
623 pa_usec_t max_sleep_usec = 0, process_usec = 0;
624 size_t left_to_record;
628 pa_source_assert_ref(u->source);
631 hw_sleep_time(u, &max_sleep_usec, &process_usec);
637 pa_bool_t after_avail = TRUE;
639 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
641 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
647 n_bytes = (size_t) n * u->frame_size;
648 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
653 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
656 if (PA_UNLIKELY(n_bytes <= 0)) {
660 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
661 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
662 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
663 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
673 pa_log_debug("Not filling up, because already too many iterations.");
683 snd_pcm_sframes_t frames;
686 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
688 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
690 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
691 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
693 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
695 p = pa_memblock_acquire(chunk.memblock);
696 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
697 pa_memblock_release(chunk.memblock);
699 if (PA_UNLIKELY(frames < 0)) {
700 pa_memblock_unref(chunk.memblock);
702 if (!after_avail && (int) frames == -EAGAIN)
705 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
711 if (!after_avail && frames == 0) {
712 pa_memblock_unref(chunk.memblock);
716 pa_assert(frames > 0);
720 chunk.length = (size_t) frames * u->frame_size;
722 pa_source_post(u->source, &chunk);
723 pa_memblock_unref(chunk.memblock);
727 u->read_count += frames * u->frame_size;
729 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
731 if ((size_t) frames * u->frame_size >= n_bytes)
734 n_bytes -= (size_t) frames * u->frame_size;
739 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
741 if (*sleep_usec > process_usec)
742 *sleep_usec -= process_usec;
747 return work_done ? 1 : 0;
750 static void update_smoother(struct userdata *u) {
751 snd_pcm_sframes_t delay = 0;
754 pa_usec_t now1 = 0, now2;
755 snd_pcm_status_t *status;
757 snd_pcm_status_alloca(&status);
760 pa_assert(u->pcm_handle);
762 /* Let's update the time smoother */
764 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
765 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
769 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
770 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
772 snd_htimestamp_t htstamp = { 0, 0 };
773 snd_pcm_status_get_htstamp(status, &htstamp);
774 now1 = pa_timespec_load(&htstamp);
777 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
779 now1 = pa_rtclock_now();
781 /* check if the time since the last update is bigger than the interval */
782 if (u->last_smoother_update > 0)
783 if (u->last_smoother_update + u->smoother_interval > now1)
786 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
787 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
789 pa_smoother_put(u->smoother, now1, now2);
791 u->last_smoother_update = now1;
792 /* exponentially increase the update interval up to the MAX limit */
793 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
796 static pa_usec_t source_get_latency(struct userdata *u) {
798 pa_usec_t now1, now2;
802 now1 = pa_rtclock_now();
803 now2 = pa_smoother_get(u->smoother, now1);
805 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
807 return delay >= 0 ? (pa_usec_t) delay : 0;
810 static int build_pollfd(struct userdata *u) {
812 pa_assert(u->pcm_handle);
814 if (u->alsa_rtpoll_item)
815 pa_rtpoll_item_free(u->alsa_rtpoll_item);
817 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
823 /* Called from IO context */
824 static int suspend(struct userdata *u) {
826 pa_assert(u->pcm_handle);
828 pa_smoother_pause(u->smoother, pa_rtclock_now());
831 snd_pcm_close(u->pcm_handle);
832 u->pcm_handle = NULL;
834 if (u->alsa_rtpoll_item) {
835 pa_rtpoll_item_free(u->alsa_rtpoll_item);
836 u->alsa_rtpoll_item = NULL;
839 pa_log_info("Device suspended...");
844 /* Called from IO context */
845 static int update_sw_params(struct userdata *u) {
846 snd_pcm_uframes_t avail_min;
851 /* Use the full buffer if noone asked us for anything specific */
857 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
860 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
862 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
864 /* We need at least one sample in our buffer */
866 if (PA_UNLIKELY(b < u->frame_size))
869 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
872 fix_min_sleep_wakeup(u);
873 fix_tsched_watermark(u);
876 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
881 pa_usec_t sleep_usec, process_usec;
883 hw_sleep_time(u, &sleep_usec, &process_usec);
884 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
887 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
889 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
890 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
897 /* Called from IO context */
898 static int unsuspend(struct userdata *u) {
902 snd_pcm_uframes_t period_size, buffer_size;
905 pa_assert(!u->pcm_handle);
907 pa_log_info("Trying resume...");
909 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
911 SND_PCM_NO_AUTO_RESAMPLE|
912 SND_PCM_NO_AUTO_CHANNELS|
913 SND_PCM_NO_AUTO_FORMAT)) < 0) {
914 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
918 ss = u->source->sample_spec;
919 period_size = u->fragment_size / u->frame_size;
920 buffer_size = u->hwbuf_size / u->frame_size;
924 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
925 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
929 if (b != u->use_mmap || d != u->use_tsched) {
930 pa_log_warn("Resume failed, couldn't get original access mode.");
934 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
935 pa_log_warn("Resume failed, couldn't restore original sample settings.");
939 if (period_size*u->frame_size != u->fragment_size ||
940 buffer_size*u->frame_size != u->hwbuf_size) {
941 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
942 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
943 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
947 if (update_sw_params(u) < 0)
950 if (build_pollfd(u) < 0)
953 /* FIXME: We need to reload the volume somehow */
956 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
957 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
958 u->last_smoother_update = 0;
962 pa_log_info("Resumed successfully...");
968 snd_pcm_close(u->pcm_handle);
969 u->pcm_handle = NULL;
975 /* Called from IO context */
976 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
977 struct userdata *u = PA_SOURCE(o)->userdata;
981 case PA_SOURCE_MESSAGE_GET_LATENCY: {
985 r = source_get_latency(u);
987 *((pa_usec_t*) data) = r;
992 case PA_SOURCE_MESSAGE_SET_STATE:
994 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
996 case PA_SOURCE_SUSPENDED: {
999 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1001 if ((r = suspend(u)) < 0)
1007 case PA_SOURCE_IDLE:
1008 case PA_SOURCE_RUNNING: {
1011 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1012 if (build_pollfd(u) < 0)
1016 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1017 if ((r = unsuspend(u)) < 0)
1024 case PA_SOURCE_UNLINKED:
1025 case PA_SOURCE_INIT:
1026 case PA_SOURCE_INVALID_STATE:
1033 return pa_source_process_msg(o, code, data, offset, chunk);
1036 /* Called from main context */
1037 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1038 pa_source_state_t old_state;
1041 pa_source_assert_ref(s);
1042 pa_assert_se(u = s->userdata);
1044 old_state = pa_source_get_state(u->source);
1046 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1048 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1049 if (reserve_init(u, u->device_name) < 0)
1050 return -PA_ERR_BUSY;
1055 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1056 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1059 pa_assert(u->mixer_handle);
1061 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1064 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1067 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1068 pa_source_get_volume(u->source, TRUE);
1069 pa_source_get_mute(u->source, TRUE);
1075 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1076 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1079 pa_assert(u->mixer_handle);
1081 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1084 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1087 if (mask & SND_CTL_EVENT_MASK_VALUE)
1088 pa_source_update_volume_and_mute(u->source);
1093 static void source_get_volume_cb(pa_source *s) {
1094 struct userdata *u = s->userdata;
1096 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1099 pa_assert(u->mixer_path);
1100 pa_assert(u->mixer_handle);
1102 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1105 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1106 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1108 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1110 if (u->mixer_path->has_dB) {
1111 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1113 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1116 if (pa_cvolume_equal(&u->hardware_volume, &r))
1119 s->real_volume = u->hardware_volume = r;
1121 /* Hmm, so the hardware volume changed, let's reset our software volume */
1122 if (u->mixer_path->has_dB)
1123 pa_source_set_soft_volume(s, NULL);
1126 static void source_set_volume_cb(pa_source *s) {
1127 struct userdata *u = s->userdata;
1129 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1130 pa_bool_t sync_volume = !!(s->flags & PA_SOURCE_SYNC_VOLUME);
1133 pa_assert(u->mixer_path);
1134 pa_assert(u->mixer_handle);
1136 /* Shift up by the base volume */
1137 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1139 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1142 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1143 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1145 u->hardware_volume = r;
1147 if (u->mixer_path->has_dB) {
1148 pa_cvolume new_soft_volume;
1149 pa_bool_t accurate_enough;
1150 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1152 /* Match exactly what the user requested by software */
1153 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1155 /* If the adjustment to do in software is only minimal we
1156 * can skip it. That saves us CPU at the expense of a bit of
1159 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1160 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1162 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1163 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1164 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1165 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1166 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1167 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1168 pa_yes_no(accurate_enough));
1169 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1171 if (!accurate_enough)
1172 s->soft_volume = new_soft_volume;
1175 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1177 /* We can't match exactly what the user requested, hence let's
1178 * at least tell the user about it */
1184 static void source_write_volume_cb(pa_source *s) {
1185 struct userdata *u = s->userdata;
1186 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1189 pa_assert(u->mixer_path);
1190 pa_assert(u->mixer_handle);
1191 pa_assert(s->flags & PA_SOURCE_SYNC_VOLUME);
1193 /* Shift up by the base volume */
1194 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1196 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1197 pa_log_error("Writing HW volume failed");
1200 pa_bool_t accurate_enough;
1202 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1203 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1205 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1207 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1208 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1210 if (!accurate_enough) {
1212 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1213 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1216 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1217 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1218 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1219 pa_log_debug(" in dB: %s (request) != %s",
1220 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1221 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1226 static void source_get_mute_cb(pa_source *s) {
1227 struct userdata *u = s->userdata;
1231 pa_assert(u->mixer_path);
1232 pa_assert(u->mixer_handle);
1234 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1240 static void source_set_mute_cb(pa_source *s) {
1241 struct userdata *u = s->userdata;
1244 pa_assert(u->mixer_path);
1245 pa_assert(u->mixer_handle);
1247 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1250 static void mixer_volume_init(struct userdata *u) {
1253 if (!u->mixer_path->has_volume) {
1254 pa_source_set_write_volume_callback(u->source, NULL);
1255 pa_source_set_get_volume_callback(u->source, NULL);
1256 pa_source_set_set_volume_callback(u->source, NULL);
1258 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1260 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1261 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1263 if (u->mixer_path->has_dB && u->sync_volume) {
1264 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1265 pa_log_info("Successfully enabled synchronous volume.");
1267 pa_source_set_write_volume_callback(u->source, NULL);
1269 if (u->mixer_path->has_dB) {
1270 pa_source_enable_decibel_volume(u->source, TRUE);
1271 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1273 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1274 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1276 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1278 pa_source_enable_decibel_volume(u->source, FALSE);
1279 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1281 u->source->base_volume = PA_VOLUME_NORM;
1282 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1285 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1288 if (!u->mixer_path->has_mute) {
1289 pa_source_set_get_mute_callback(u->source, NULL);
1290 pa_source_set_set_mute_callback(u->source, NULL);
1291 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1293 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1294 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1295 pa_log_info("Using hardware mute control.");
1299 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1300 struct userdata *u = s->userdata;
1301 pa_alsa_port_data *data;
1305 pa_assert(u->mixer_handle);
1307 data = PA_DEVICE_PORT_DATA(p);
1309 pa_assert_se(u->mixer_path = data->path);
1310 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1312 mixer_volume_init(u);
1315 pa_alsa_setting_select(data->setting, u->mixer_handle);
1325 static void source_update_requested_latency_cb(pa_source *s) {
1326 struct userdata *u = s->userdata;
1328 pa_assert(u->use_tsched); /* only when timer scheduling is used
1329 * we can dynamically adjust the
1335 update_sw_params(u);
1338 static void thread_func(void *userdata) {
1339 struct userdata *u = userdata;
1340 unsigned short revents = 0;
1344 pa_log_debug("Thread starting up");
1346 if (u->core->realtime_scheduling)
1347 pa_make_realtime(u->core->realtime_priority);
1349 pa_thread_mq_install(&u->thread_mq);
1353 pa_usec_t rtpoll_sleep = 0;
1356 pa_log_debug("Loop");
1359 /* Read some data and pass it to the sources */
1360 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1362 pa_usec_t sleep_usec = 0;
1363 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1366 pa_log_info("Starting capture.");
1367 snd_pcm_start(u->pcm_handle);
1369 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1375 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1377 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1382 /* pa_log_debug("work_done = %i", work_done); */
1387 if (u->use_tsched) {
1390 /* OK, the capture buffer is now empty, let's
1391 * calculate when to wake up next */
1393 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1395 /* Convert from the sound card time domain to the
1396 * system time domain */
1397 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1399 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1401 /* We don't trust the conversion, so we wake up whatever comes first */
1402 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1406 if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1407 pa_usec_t volume_sleep;
1408 pa_source_volume_change_apply(u->source, &volume_sleep);
1409 if (volume_sleep > 0)
1410 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1413 if (rtpoll_sleep > 0)
1414 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1416 pa_rtpoll_set_timer_disabled(u->rtpoll);
1418 /* Hmm, nothing to do. Let's sleep */
1419 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1422 if (u->source->flags & PA_SOURCE_SYNC_VOLUME)
1423 pa_source_volume_change_apply(u->source, NULL);
1428 /* Tell ALSA about this and process its response */
1429 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1430 struct pollfd *pollfd;
1434 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1436 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1437 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1441 if (revents & ~POLLIN) {
1442 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1446 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1447 pa_log_debug("Wakeup from ALSA!");
1454 /* If this was no regular exit from the loop we have to continue
1455 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1456 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1457 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1460 pa_log_debug("Thread shutting down");
1463 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1469 pa_assert(device_name);
1471 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1472 pa_source_new_data_set_name(data, n);
1473 data->namereg_fail = TRUE;
1477 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1478 data->namereg_fail = TRUE;
1480 n = device_id ? device_id : device_name;
1481 data->namereg_fail = FALSE;
1485 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1487 t = pa_sprintf_malloc("alsa_input.%s", n);
1489 pa_source_new_data_set_name(data, t);
1493 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1495 if (!mapping && !element)
1498 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1499 pa_log_info("Failed to find a working mixer device.");
1505 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1508 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1511 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1512 pa_alsa_path_dump(u->mixer_path);
1515 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1518 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1525 if (u->mixer_path_set) {
1526 pa_alsa_path_set_free(u->mixer_path_set);
1527 u->mixer_path_set = NULL;
1528 } else if (u->mixer_path) {
1529 pa_alsa_path_free(u->mixer_path);
1530 u->mixer_path = NULL;
1533 if (u->mixer_handle) {
1534 snd_mixer_close(u->mixer_handle);
1535 u->mixer_handle = NULL;
1539 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1540 pa_bool_t need_mixer_callback = FALSE;
1544 if (!u->mixer_handle)
1547 if (u->source->active_port) {
1548 pa_alsa_port_data *data;
1550 /* We have a list of supported paths, so let's activate the
1551 * one that has been chosen as active */
1553 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1554 u->mixer_path = data->path;
1556 pa_alsa_path_select(data->path, u->mixer_handle);
1559 pa_alsa_setting_select(data->setting, u->mixer_handle);
1563 if (!u->mixer_path && u->mixer_path_set)
1564 u->mixer_path = u->mixer_path_set->paths;
1566 if (u->mixer_path) {
1567 /* Hmm, we have only a single path, then let's activate it */
1569 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1571 if (u->mixer_path->settings)
1572 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1577 mixer_volume_init(u);
1579 /* Will we need to register callbacks? */
1580 if (u->mixer_path_set && u->mixer_path_set->paths) {
1583 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1584 if (p->has_volume || p->has_mute)
1585 need_mixer_callback = TRUE;
1588 else if (u->mixer_path)
1589 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1591 if (need_mixer_callback) {
1592 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1593 if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1594 u->mixer_pd = pa_alsa_mixer_pdata_new();
1595 mixer_callback = io_mixer_callback;
1597 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1598 pa_log("Failed to initialize file descriptor monitoring");
1602 u->mixer_fdl = pa_alsa_fdlist_new();
1603 mixer_callback = ctl_mixer_callback;
1605 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1606 pa_log("Failed to initialize file descriptor monitoring");
1611 if (u->mixer_path_set)
1612 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1614 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1620 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1622 struct userdata *u = NULL;
1623 const char *dev_id = NULL;
1624 pa_sample_spec ss, requested_ss;
1626 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1627 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1629 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1630 pa_source_new_data data;
1631 pa_alsa_profile_set *profile_set = NULL;
1636 ss = m->core->default_sample_spec;
1637 map = m->core->default_channel_map;
1638 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1639 pa_log("Failed to parse sample specification and channel map");
1644 frame_size = pa_frame_size(&ss);
1646 nfrags = m->core->default_n_fragments;
1647 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1649 frag_size = (uint32_t) frame_size;
1650 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1651 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1653 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1654 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1655 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1656 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1657 pa_log("Failed to parse buffer metrics");
1661 buffer_size = nfrags * frag_size;
1663 period_frames = frag_size/frame_size;
1664 buffer_frames = buffer_size/frame_size;
1665 tsched_frames = tsched_size/frame_size;
1667 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1668 pa_log("Failed to parse mmap argument.");
1672 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1673 pa_log("Failed to parse tsched argument.");
1677 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1678 pa_log("Failed to parse ignore_dB argument.");
1682 sync_volume = m->core->sync_volume;
1683 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1684 pa_log("Failed to parse sync_volume argument.");
1688 use_tsched = pa_alsa_may_tsched(use_tsched);
1690 u = pa_xnew0(struct userdata, 1);
1693 u->use_mmap = use_mmap;
1694 u->use_tsched = use_tsched;
1695 u->sync_volume = sync_volume;
1697 u->rtpoll = pa_rtpoll_new();
1698 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1700 u->smoother = pa_smoother_new(
1701 SMOOTHER_ADJUST_USEC,
1702 SMOOTHER_WINDOW_USEC,
1708 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1710 dev_id = pa_modargs_get_value(
1712 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1714 if (reserve_init(u, dev_id) < 0)
1717 if (reserve_monitor_init(u, dev_id) < 0)
1725 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1726 pa_log("device_id= not set");
1730 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1734 SND_PCM_STREAM_CAPTURE,
1735 &period_frames, &buffer_frames, tsched_frames,
1739 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1741 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1744 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1748 SND_PCM_STREAM_CAPTURE,
1749 &period_frames, &buffer_frames, tsched_frames,
1750 &b, &d, profile_set, &mapping)))
1755 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1756 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1759 SND_PCM_STREAM_CAPTURE,
1760 &period_frames, &buffer_frames, tsched_frames,
1765 pa_assert(u->device_name);
1766 pa_log_info("Successfully opened device %s.", u->device_name);
1768 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1769 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1774 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1776 if (use_mmap && !b) {
1777 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1778 u->use_mmap = use_mmap = FALSE;
1781 if (use_tsched && (!b || !d)) {
1782 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1783 u->use_tsched = use_tsched = FALSE;
1787 pa_log_info("Successfully enabled mmap() mode.");
1790 pa_log_info("Successfully enabled timer-based scheduling mode.");
1792 /* ALSA might tweak the sample spec, so recalculate the frame size */
1793 frame_size = pa_frame_size(&ss);
1795 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1797 pa_source_new_data_init(&data);
1798 data.driver = driver;
1801 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1803 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1804 * variable instead of using &data.namereg_fail directly, because
1805 * data.namereg_fail is a bitfield and taking the address of a bitfield
1806 * variable is impossible. */
1807 namereg_fail = data.namereg_fail;
1808 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1809 pa_log("Failed to parse boolean argument namereg_fail.");
1810 pa_source_new_data_done(&data);
1813 data.namereg_fail = namereg_fail;
1815 pa_source_new_data_set_sample_spec(&data, &ss);
1816 pa_source_new_data_set_channel_map(&data, &map);
1818 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1819 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1820 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1821 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1822 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1825 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1826 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1829 pa_alsa_init_description(data.proplist);
1831 if (u->control_device)
1832 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1834 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1835 pa_log("Invalid properties");
1836 pa_source_new_data_done(&data);
1840 if (u->mixer_path_set)
1841 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1843 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1844 pa_source_new_data_done(&data);
1847 pa_log("Failed to create source object");
1851 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
1852 &u->source->thread_info.volume_change_safety_margin) < 0) {
1853 pa_log("Failed to parse sync_volume_safety_margin parameter");
1857 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
1858 &u->source->thread_info.volume_change_extra_delay) < 0) {
1859 pa_log("Failed to parse sync_volume_extra_delay parameter");
1863 u->source->parent.process_msg = source_process_msg;
1865 u->source->update_requested_latency = source_update_requested_latency_cb;
1866 u->source->set_state = source_set_state_cb;
1867 u->source->set_port = source_set_port_cb;
1868 u->source->userdata = u;
1870 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1871 pa_source_set_rtpoll(u->source, u->rtpoll);
1873 u->frame_size = frame_size;
1874 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1875 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1876 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1878 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1879 (double) u->hwbuf_size / (double) u->fragment_size,
1880 (long unsigned) u->fragment_size,
1881 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1882 (long unsigned) u->hwbuf_size,
1883 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1885 if (u->use_tsched) {
1886 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1888 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1889 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1891 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1892 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1894 fix_min_sleep_wakeup(u);
1895 fix_tsched_watermark(u);
1897 pa_source_set_latency_range(u->source,
1899 pa_bytes_to_usec(u->hwbuf_size, &ss));
1901 pa_log_info("Time scheduling watermark is %0.2fms",
1902 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1904 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1908 if (update_sw_params(u) < 0)
1911 if (setup_mixer(u, ignore_dB) < 0)
1914 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1916 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1917 pa_log("Failed to create thread.");
1921 /* Get initial mixer settings */
1922 if (data.volume_is_set) {
1923 if (u->source->set_volume)
1924 u->source->set_volume(u->source);
1926 if (u->source->get_volume)
1927 u->source->get_volume(u->source);
1930 if (data.muted_is_set) {
1931 if (u->source->set_mute)
1932 u->source->set_mute(u->source);
1934 if (u->source->get_mute)
1935 u->source->get_mute(u->source);
1938 pa_source_put(u->source);
1941 pa_alsa_profile_set_free(profile_set);
1951 pa_alsa_profile_set_free(profile_set);
1956 static void userdata_free(struct userdata *u) {
1960 pa_source_unlink(u->source);
1963 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1964 pa_thread_free(u->thread);
1967 pa_thread_mq_done(&u->thread_mq);
1970 pa_source_unref(u->source);
1973 pa_alsa_mixer_pdata_free(u->mixer_pd);
1975 if (u->alsa_rtpoll_item)
1976 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1979 pa_rtpoll_free(u->rtpoll);
1981 if (u->pcm_handle) {
1982 snd_pcm_drop(u->pcm_handle);
1983 snd_pcm_close(u->pcm_handle);
1987 pa_alsa_fdlist_free(u->mixer_fdl);
1989 if (u->mixer_path_set)
1990 pa_alsa_path_set_free(u->mixer_path_set);
1991 else if (u->mixer_path)
1992 pa_alsa_path_free(u->mixer_path);
1994 if (u->mixer_handle)
1995 snd_mixer_close(u->mixer_handle);
1998 pa_smoother_free(u->smoother);
2003 pa_xfree(u->device_name);
2004 pa_xfree(u->control_device);
2008 void pa_alsa_source_free(pa_source *s) {
2011 pa_source_assert_ref(s);
2012 pa_assert_se(u = s->userdata);