2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
52 #include <modules/reserve-wrap.h>
54 #include "alsa-util.h"
55 #include "alsa-source.h"
57 /* #define DEBUG_TIMING */
59 #define DEFAULT_DEVICE "default"
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
75 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
88 pa_thread_mq thread_mq;
91 snd_pcm_t *pcm_handle;
93 pa_alsa_fdlist *mixer_fdl;
94 pa_alsa_mixer_pdata *mixer_pd;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
99 pa_cvolume hardware_volume;
111 watermark_inc_threshold,
112 watermark_dec_threshold;
114 pa_usec_t watermark_dec_not_before;
116 char *device_name; /* name of the PCM device */
117 char *control_device; /* name of the control device */
119 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
123 pa_rtpoll_item *alsa_rtpoll_item;
125 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
127 pa_smoother *smoother;
129 pa_usec_t smoother_interval;
130 pa_usec_t last_smoother_update;
132 pa_reserve_wrapper *reserve;
133 pa_hook_slot *reserve_slot;
134 pa_reserve_monitor_wrapper *monitor;
135 pa_hook_slot *monitor_slot;
138 static void userdata_free(struct userdata *u);
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
144 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145 return PA_HOOK_CANCEL;
150 static void reserve_done(struct userdata *u) {
153 if (u->reserve_slot) {
154 pa_hook_slot_free(u->reserve_slot);
155 u->reserve_slot = NULL;
159 pa_reserve_wrapper_unref(u->reserve);
164 static void reserve_update(struct userdata *u) {
165 const char *description;
168 if (!u->source || !u->reserve)
171 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
175 static int reserve_init(struct userdata *u, const char *dname) {
184 if (pa_in_system_mode())
187 if (!(rname = pa_alsa_get_reserve_name(dname)))
190 /* We are resuming, try to lock the device */
191 u->reserve = pa_reserve_wrapper_get(u->core, rname);
199 pa_assert(!u->reserve_slot);
200 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
211 b = PA_PTR_TO_UINT(busy) && !u->reserve;
213 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
217 static void monitor_done(struct userdata *u) {
220 if (u->monitor_slot) {
221 pa_hook_slot_free(u->monitor_slot);
222 u->monitor_slot = NULL;
226 pa_reserve_monitor_wrapper_unref(u->monitor);
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
237 if (pa_in_system_mode())
240 if (!(rname = pa_alsa_get_reserve_name(dname)))
243 /* We are resuming, try to lock the device */
244 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
250 pa_assert(!u->monitor_slot);
251 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257 size_t max_use, max_use_2;
260 pa_assert(u->use_tsched);
262 max_use = u->hwbuf_size - u->hwbuf_unused;
263 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
265 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
268 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
272 static void fix_tsched_watermark(struct userdata *u) {
275 pa_assert(u->use_tsched);
277 max_use = u->hwbuf_size - u->hwbuf_unused;
279 if (u->tsched_watermark > max_use - u->min_sleep)
280 u->tsched_watermark = max_use - u->min_sleep;
282 if (u->tsched_watermark < u->min_wakeup)
283 u->tsched_watermark = u->min_wakeup;
286 static void increase_watermark(struct userdata *u) {
287 size_t old_watermark;
288 pa_usec_t old_min_latency, new_min_latency;
291 pa_assert(u->use_tsched);
293 /* First, just try to increase the watermark */
294 old_watermark = u->tsched_watermark;
295 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296 fix_tsched_watermark(u);
298 if (old_watermark != u->tsched_watermark) {
299 pa_log_info("Increasing wakeup watermark to %0.2f ms",
300 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
304 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305 old_min_latency = u->source->thread_info.min_latency;
306 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
309 if (old_min_latency != new_min_latency) {
310 pa_log_info("Increasing minimal latency to %0.2f ms",
311 (double) new_min_latency / PA_USEC_PER_MSEC);
313 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
316 /* When we reach this we're officialy fucked! */
319 static void decrease_watermark(struct userdata *u) {
320 size_t old_watermark;
324 pa_assert(u->use_tsched);
326 now = pa_rtclock_now();
328 if (u->watermark_dec_not_before <= 0)
331 if (u->watermark_dec_not_before > now)
334 old_watermark = u->tsched_watermark;
336 if (u->tsched_watermark < u->watermark_dec_step)
337 u->tsched_watermark = u->tsched_watermark / 2;
339 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
341 fix_tsched_watermark(u);
343 if (old_watermark != u->tsched_watermark)
344 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
347 /* We don't change the latency range*/
350 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
356 pa_assert(sleep_usec);
357 pa_assert(process_usec);
360 pa_assert(u->use_tsched);
362 usec = pa_source_get_requested_latency_within_thread(u->source);
364 if (usec == (pa_usec_t) -1)
365 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
367 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
372 *sleep_usec = usec - wm;
376 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377 (unsigned long) (usec / PA_USEC_PER_MSEC),
378 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
383 static int try_recover(struct userdata *u, const char *call, int err) {
388 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
390 pa_assert(err != -EAGAIN);
393 pa_log_debug("%s: Buffer overrun!", call);
395 if (err == -ESTRPIPE)
396 pa_log_debug("%s: System suspended!", call);
398 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399 pa_log("%s: %s", call, pa_alsa_strerror(err));
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408 size_t left_to_record;
409 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410 pa_bool_t overrun = FALSE;
412 /* We use <= instead of < for this check here because an overrun
413 * only happens after the last sample was processed, not already when
414 * it is removed from the buffer. This is particularly important
415 * when block transfer is used. */
417 if (n_bytes <= rec_space)
418 left_to_record = rec_space - n_bytes;
421 /* We got a dropout. What a mess! */
429 if (pa_log_ratelimit(PA_LOG_INFO))
430 pa_log_info("Overrun!");
434 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
438 pa_bool_t reset_not_before = TRUE;
440 if (overrun || left_to_record < u->watermark_inc_threshold)
441 increase_watermark(u);
442 else if (left_to_record > u->watermark_dec_threshold) {
443 reset_not_before = FALSE;
445 /* We decrease the watermark only if have actually
446 * been woken up by a timeout. If something else woke
447 * us up it's too easy to fulfill the deadlines... */
450 decrease_watermark(u);
453 if (reset_not_before)
454 u->watermark_dec_not_before = 0;
457 return left_to_record;
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461 pa_bool_t work_done = FALSE;
462 pa_usec_t max_sleep_usec = 0, process_usec = 0;
463 size_t left_to_record;
467 pa_source_assert_ref(u->source);
470 hw_sleep_time(u, &max_sleep_usec, &process_usec);
476 pa_bool_t after_avail = TRUE;
478 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
480 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
486 n_bytes = (size_t) n * u->frame_size;
489 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
497 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
499 pa_log_debug("Not reading, because too early.");
504 if (PA_UNLIKELY(n_bytes <= 0)) {
508 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
517 pa_log_debug("Not reading, because not necessary.");
525 pa_log_debug("Not filling up, because already too many iterations.");
534 pa_log_debug("Reading");
541 const snd_pcm_channel_area_t *areas;
542 snd_pcm_uframes_t offset, frames;
543 snd_pcm_sframes_t sframes;
545 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
548 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
550 if (!after_avail && err == -EAGAIN)
553 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
559 /* Make sure that if these memblocks need to be copied they will fit into one slot */
560 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
561 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
563 if (!after_avail && frames == 0)
566 pa_assert(frames > 0);
569 /* Check these are multiples of 8 bit */
570 pa_assert((areas[0].first & 7) == 0);
571 pa_assert((areas[0].step & 7)== 0);
573 /* We assume a single interleaved memory buffer */
574 pa_assert((areas[0].first >> 3) == 0);
575 pa_assert((areas[0].step >> 3) == u->frame_size);
577 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
579 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580 chunk.length = pa_memblock_get_length(chunk.memblock);
583 pa_source_post(u->source, &chunk);
584 pa_memblock_unref_fixed(chunk.memblock);
586 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
588 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
596 u->read_count += frames * u->frame_size;
599 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 if ((size_t) frames * u->frame_size >= n_bytes)
605 n_bytes -= (size_t) frames * u->frame_size;
610 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
611 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
613 if (*sleep_usec > process_usec)
614 *sleep_usec -= process_usec;
619 return work_done ? 1 : 0;
622 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
623 int work_done = FALSE;
624 pa_usec_t max_sleep_usec = 0, process_usec = 0;
625 size_t left_to_record;
629 pa_source_assert_ref(u->source);
632 hw_sleep_time(u, &max_sleep_usec, &process_usec);
638 pa_bool_t after_avail = TRUE;
640 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
642 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
648 n_bytes = (size_t) n * u->frame_size;
649 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
654 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
657 if (PA_UNLIKELY(n_bytes <= 0)) {
661 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
662 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
663 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
664 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
674 pa_log_debug("Not filling up, because already too many iterations.");
684 snd_pcm_sframes_t frames;
687 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
689 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
691 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
692 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
694 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
696 p = pa_memblock_acquire(chunk.memblock);
697 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
698 pa_memblock_release(chunk.memblock);
700 if (PA_UNLIKELY(frames < 0)) {
701 pa_memblock_unref(chunk.memblock);
703 if (!after_avail && (int) frames == -EAGAIN)
706 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
712 if (!after_avail && frames == 0) {
713 pa_memblock_unref(chunk.memblock);
717 pa_assert(frames > 0);
721 chunk.length = (size_t) frames * u->frame_size;
723 pa_source_post(u->source, &chunk);
724 pa_memblock_unref(chunk.memblock);
728 u->read_count += frames * u->frame_size;
730 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
732 if ((size_t) frames * u->frame_size >= n_bytes)
735 n_bytes -= (size_t) frames * u->frame_size;
740 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
741 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
743 if (*sleep_usec > process_usec)
744 *sleep_usec -= process_usec;
749 return work_done ? 1 : 0;
752 static void update_smoother(struct userdata *u) {
753 snd_pcm_sframes_t delay = 0;
756 pa_usec_t now1 = 0, now2;
757 snd_pcm_status_t *status;
759 snd_pcm_status_alloca(&status);
762 pa_assert(u->pcm_handle);
764 /* Let's update the time smoother */
766 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
771 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
774 snd_htimestamp_t htstamp = { 0, 0 };
775 snd_pcm_status_get_htstamp(status, &htstamp);
776 now1 = pa_timespec_load(&htstamp);
779 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
781 now1 = pa_rtclock_now();
783 /* check if the time since the last update is bigger than the interval */
784 if (u->last_smoother_update > 0)
785 if (u->last_smoother_update + u->smoother_interval > now1)
788 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
791 pa_smoother_put(u->smoother, now1, now2);
793 u->last_smoother_update = now1;
794 /* exponentially increase the update interval up to the MAX limit */
795 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
798 static pa_usec_t source_get_latency(struct userdata *u) {
800 pa_usec_t now1, now2;
804 now1 = pa_rtclock_now();
805 now2 = pa_smoother_get(u->smoother, now1);
807 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
809 return delay >= 0 ? (pa_usec_t) delay : 0;
812 static int build_pollfd(struct userdata *u) {
814 pa_assert(u->pcm_handle);
816 if (u->alsa_rtpoll_item)
817 pa_rtpoll_item_free(u->alsa_rtpoll_item);
819 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
825 /* Called from IO context */
826 static int suspend(struct userdata *u) {
828 pa_assert(u->pcm_handle);
830 pa_smoother_pause(u->smoother, pa_rtclock_now());
833 snd_pcm_close(u->pcm_handle);
834 u->pcm_handle = NULL;
836 if (u->alsa_rtpoll_item) {
837 pa_rtpoll_item_free(u->alsa_rtpoll_item);
838 u->alsa_rtpoll_item = NULL;
841 pa_log_info("Device suspended...");
846 /* Called from IO context */
847 static int update_sw_params(struct userdata *u) {
848 snd_pcm_uframes_t avail_min;
853 /* Use the full buffer if no one asked us for anything specific */
859 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
862 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
864 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
866 /* We need at least one sample in our buffer */
868 if (PA_UNLIKELY(b < u->frame_size))
871 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
874 fix_min_sleep_wakeup(u);
875 fix_tsched_watermark(u);
878 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
883 pa_usec_t sleep_usec, process_usec;
885 hw_sleep_time(u, &sleep_usec, &process_usec);
886 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
889 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
891 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
892 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
899 /* Called from IO context */
900 static int unsuspend(struct userdata *u) {
904 snd_pcm_uframes_t period_size, buffer_size;
907 pa_assert(!u->pcm_handle);
909 pa_log_info("Trying resume...");
911 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
913 SND_PCM_NO_AUTO_RESAMPLE|
914 SND_PCM_NO_AUTO_CHANNELS|
915 SND_PCM_NO_AUTO_FORMAT)) < 0) {
916 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
920 ss = u->source->sample_spec;
921 period_size = u->fragment_size / u->frame_size;
922 buffer_size = u->hwbuf_size / u->frame_size;
926 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
927 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
931 if (b != u->use_mmap || d != u->use_tsched) {
932 pa_log_warn("Resume failed, couldn't get original access mode.");
936 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
937 pa_log_warn("Resume failed, couldn't restore original sample settings.");
941 if (period_size*u->frame_size != u->fragment_size ||
942 buffer_size*u->frame_size != u->hwbuf_size) {
943 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
944 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
945 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
949 if (update_sw_params(u) < 0)
952 if (build_pollfd(u) < 0)
955 /* FIXME: We need to reload the volume somehow */
958 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
959 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
960 u->last_smoother_update = 0;
964 pa_log_info("Resumed successfully...");
970 snd_pcm_close(u->pcm_handle);
971 u->pcm_handle = NULL;
977 /* Called from IO context */
978 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
979 struct userdata *u = PA_SOURCE(o)->userdata;
983 case PA_SOURCE_MESSAGE_GET_LATENCY: {
987 r = source_get_latency(u);
989 *((pa_usec_t*) data) = r;
994 case PA_SOURCE_MESSAGE_SET_STATE:
996 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
998 case PA_SOURCE_SUSPENDED: {
1001 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1003 if ((r = suspend(u)) < 0)
1009 case PA_SOURCE_IDLE:
1010 case PA_SOURCE_RUNNING: {
1013 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1014 if (build_pollfd(u) < 0)
1018 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1019 if ((r = unsuspend(u)) < 0)
1026 case PA_SOURCE_UNLINKED:
1027 case PA_SOURCE_INIT:
1028 case PA_SOURCE_INVALID_STATE:
1035 return pa_source_process_msg(o, code, data, offset, chunk);
1038 /* Called from main context */
1039 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1040 pa_source_state_t old_state;
1043 pa_source_assert_ref(s);
1044 pa_assert_se(u = s->userdata);
1046 old_state = pa_source_get_state(u->source);
1048 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1050 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1051 if (reserve_init(u, u->device_name) < 0)
1052 return -PA_ERR_BUSY;
1057 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1058 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1061 pa_assert(u->mixer_handle);
1063 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1066 if (!PA_SOURCE_IS_LINKED(u->source->state))
1069 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1072 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1073 pa_source_get_volume(u->source, TRUE);
1074 pa_source_get_mute(u->source, TRUE);
1080 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1081 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1084 pa_assert(u->mixer_handle);
1086 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1089 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1092 if (mask & SND_CTL_EVENT_MASK_VALUE)
1093 pa_source_update_volume_and_mute(u->source);
1098 static void source_get_volume_cb(pa_source *s) {
1099 struct userdata *u = s->userdata;
1101 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1104 pa_assert(u->mixer_path);
1105 pa_assert(u->mixer_handle);
1107 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1110 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1111 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1113 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1115 if (u->mixer_path->has_dB) {
1116 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1118 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1121 if (pa_cvolume_equal(&u->hardware_volume, &r))
1124 s->real_volume = u->hardware_volume = r;
1126 /* Hmm, so the hardware volume changed, let's reset our software volume */
1127 if (u->mixer_path->has_dB)
1128 pa_source_set_soft_volume(s, NULL);
1131 static void source_set_volume_cb(pa_source *s) {
1132 struct userdata *u = s->userdata;
1134 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1135 pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1138 pa_assert(u->mixer_path);
1139 pa_assert(u->mixer_handle);
1141 /* Shift up by the base volume */
1142 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1144 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1147 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1148 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1150 u->hardware_volume = r;
1152 if (u->mixer_path->has_dB) {
1153 pa_cvolume new_soft_volume;
1154 pa_bool_t accurate_enough;
1155 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1157 /* Match exactly what the user requested by software */
1158 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1160 /* If the adjustment to do in software is only minimal we
1161 * can skip it. That saves us CPU at the expense of a bit of
1164 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1165 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1167 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1168 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1169 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1170 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1171 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1172 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1173 pa_yes_no(accurate_enough));
1174 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1176 if (!accurate_enough)
1177 s->soft_volume = new_soft_volume;
1180 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1182 /* We can't match exactly what the user requested, hence let's
1183 * at least tell the user about it */
1189 static void source_write_volume_cb(pa_source *s) {
1190 struct userdata *u = s->userdata;
1191 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1194 pa_assert(u->mixer_path);
1195 pa_assert(u->mixer_handle);
1196 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1198 /* Shift up by the base volume */
1199 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1201 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1202 pa_log_error("Writing HW volume failed");
1205 pa_bool_t accurate_enough;
1207 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1208 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1210 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1212 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1213 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1215 if (!accurate_enough) {
1217 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1218 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1221 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1222 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1223 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1224 pa_log_debug(" in dB: %s (request) != %s",
1225 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1226 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1231 static void source_get_mute_cb(pa_source *s) {
1232 struct userdata *u = s->userdata;
1236 pa_assert(u->mixer_path);
1237 pa_assert(u->mixer_handle);
1239 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1245 static void source_set_mute_cb(pa_source *s) {
1246 struct userdata *u = s->userdata;
1249 pa_assert(u->mixer_path);
1250 pa_assert(u->mixer_handle);
1252 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1255 static void mixer_volume_init(struct userdata *u) {
1258 if (!u->mixer_path->has_volume) {
1259 pa_source_set_write_volume_callback(u->source, NULL);
1260 pa_source_set_get_volume_callback(u->source, NULL);
1261 pa_source_set_set_volume_callback(u->source, NULL);
1263 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1265 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1266 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1268 if (u->mixer_path->has_dB && u->deferred_volume) {
1269 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1270 pa_log_info("Successfully enabled synchronous volume.");
1272 pa_source_set_write_volume_callback(u->source, NULL);
1274 if (u->mixer_path->has_dB) {
1275 pa_source_enable_decibel_volume(u->source, TRUE);
1276 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1278 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1279 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1281 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1283 pa_source_enable_decibel_volume(u->source, FALSE);
1284 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1286 u->source->base_volume = PA_VOLUME_NORM;
1287 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1290 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1293 if (!u->mixer_path->has_mute) {
1294 pa_source_set_get_mute_callback(u->source, NULL);
1295 pa_source_set_set_mute_callback(u->source, NULL);
1296 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1298 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1299 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1300 pa_log_info("Using hardware mute control.");
1304 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1305 struct userdata *u = s->userdata;
1306 pa_alsa_port_data *data;
1310 pa_assert(u->mixer_handle);
1312 data = PA_DEVICE_PORT_DATA(p);
1314 pa_assert_se(u->mixer_path = data->path);
1315 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1317 mixer_volume_init(u);
1320 pa_alsa_setting_select(data->setting, u->mixer_handle);
1330 static void source_update_requested_latency_cb(pa_source *s) {
1331 struct userdata *u = s->userdata;
1333 pa_assert(u->use_tsched); /* only when timer scheduling is used
1334 * we can dynamically adjust the
1340 update_sw_params(u);
1343 static void thread_func(void *userdata) {
1344 struct userdata *u = userdata;
1345 unsigned short revents = 0;
1349 pa_log_debug("Thread starting up");
1351 if (u->core->realtime_scheduling)
1352 pa_make_realtime(u->core->realtime_priority);
1354 pa_thread_mq_install(&u->thread_mq);
1358 pa_usec_t rtpoll_sleep = 0;
1361 pa_log_debug("Loop");
1364 /* Read some data and pass it to the sources */
1365 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1367 pa_usec_t sleep_usec = 0;
1368 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1371 pa_log_info("Starting capture.");
1372 snd_pcm_start(u->pcm_handle);
1374 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1380 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1382 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1387 /* pa_log_debug("work_done = %i", work_done); */
1392 if (u->use_tsched) {
1395 /* OK, the capture buffer is now empty, let's
1396 * calculate when to wake up next */
1398 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1400 /* Convert from the sound card time domain to the
1401 * system time domain */
1402 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1404 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1406 /* We don't trust the conversion, so we wake up whatever comes first */
1407 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1411 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1412 pa_usec_t volume_sleep;
1413 pa_source_volume_change_apply(u->source, &volume_sleep);
1414 if (volume_sleep > 0)
1415 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1418 if (rtpoll_sleep > 0)
1419 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1421 pa_rtpoll_set_timer_disabled(u->rtpoll);
1423 /* Hmm, nothing to do. Let's sleep */
1424 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1427 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1428 pa_source_volume_change_apply(u->source, NULL);
1433 /* Tell ALSA about this and process its response */
1434 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1435 struct pollfd *pollfd;
1439 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1441 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1442 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1446 if (revents & ~POLLIN) {
1447 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1452 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1453 pa_log_debug("Wakeup from ALSA!");
1460 /* If this was no regular exit from the loop we have to continue
1461 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1462 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1463 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1466 pa_log_debug("Thread shutting down");
1469 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1475 pa_assert(device_name);
1477 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1478 pa_source_new_data_set_name(data, n);
1479 data->namereg_fail = TRUE;
1483 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1484 data->namereg_fail = TRUE;
1486 n = device_id ? device_id : device_name;
1487 data->namereg_fail = FALSE;
1491 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1493 t = pa_sprintf_malloc("alsa_input.%s", n);
1495 pa_source_new_data_set_name(data, t);
1499 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1501 if (!mapping && !element)
1504 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1505 pa_log_info("Failed to find a working mixer device.");
1511 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1514 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1517 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1518 pa_alsa_path_dump(u->mixer_path);
1521 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1524 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1531 if (u->mixer_path_set) {
1532 pa_alsa_path_set_free(u->mixer_path_set);
1533 u->mixer_path_set = NULL;
1534 } else if (u->mixer_path) {
1535 pa_alsa_path_free(u->mixer_path);
1536 u->mixer_path = NULL;
1539 if (u->mixer_handle) {
1540 snd_mixer_close(u->mixer_handle);
1541 u->mixer_handle = NULL;
1545 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1546 pa_bool_t need_mixer_callback = FALSE;
1550 if (!u->mixer_handle)
1553 if (u->source->active_port) {
1554 pa_alsa_port_data *data;
1556 /* We have a list of supported paths, so let's activate the
1557 * one that has been chosen as active */
1559 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1560 u->mixer_path = data->path;
1562 pa_alsa_path_select(data->path, u->mixer_handle);
1565 pa_alsa_setting_select(data->setting, u->mixer_handle);
1569 if (!u->mixer_path && u->mixer_path_set)
1570 u->mixer_path = u->mixer_path_set->paths;
1572 if (u->mixer_path) {
1573 /* Hmm, we have only a single path, then let's activate it */
1575 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1577 if (u->mixer_path->settings)
1578 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1583 mixer_volume_init(u);
1585 /* Will we need to register callbacks? */
1586 if (u->mixer_path_set && u->mixer_path_set->paths) {
1589 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1590 if (p->has_volume || p->has_mute)
1591 need_mixer_callback = TRUE;
1594 else if (u->mixer_path)
1595 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1597 if (need_mixer_callback) {
1598 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1599 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1600 u->mixer_pd = pa_alsa_mixer_pdata_new();
1601 mixer_callback = io_mixer_callback;
1603 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1604 pa_log("Failed to initialize file descriptor monitoring");
1608 u->mixer_fdl = pa_alsa_fdlist_new();
1609 mixer_callback = ctl_mixer_callback;
1611 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1612 pa_log("Failed to initialize file descriptor monitoring");
1617 if (u->mixer_path_set)
1618 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1620 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1626 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1628 struct userdata *u = NULL;
1629 const char *dev_id = NULL;
1630 pa_sample_spec ss, requested_ss;
1632 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1633 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1635 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE;
1636 pa_source_new_data data;
1637 pa_alsa_profile_set *profile_set = NULL;
1642 ss = m->core->default_sample_spec;
1643 map = m->core->default_channel_map;
1644 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1645 pa_log("Failed to parse sample specification and channel map");
1650 frame_size = pa_frame_size(&ss);
1652 nfrags = m->core->default_n_fragments;
1653 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1655 frag_size = (uint32_t) frame_size;
1656 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1657 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1659 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1660 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1661 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1662 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1663 pa_log("Failed to parse buffer metrics");
1667 buffer_size = nfrags * frag_size;
1669 period_frames = frag_size/frame_size;
1670 buffer_frames = buffer_size/frame_size;
1671 tsched_frames = tsched_size/frame_size;
1673 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1674 pa_log("Failed to parse mmap argument.");
1678 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1679 pa_log("Failed to parse tsched argument.");
1683 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1684 pa_log("Failed to parse ignore_dB argument.");
1688 deferred_volume = m->core->deferred_volume;
1689 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1690 pa_log("Failed to parse deferred_volume argument.");
1694 use_tsched = pa_alsa_may_tsched(use_tsched);
1696 u = pa_xnew0(struct userdata, 1);
1699 u->use_mmap = use_mmap;
1700 u->use_tsched = use_tsched;
1701 u->deferred_volume = deferred_volume;
1703 u->rtpoll = pa_rtpoll_new();
1704 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1706 u->smoother = pa_smoother_new(
1707 SMOOTHER_ADJUST_USEC,
1708 SMOOTHER_WINDOW_USEC,
1714 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1716 dev_id = pa_modargs_get_value(
1718 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1720 if (reserve_init(u, dev_id) < 0)
1723 if (reserve_monitor_init(u, dev_id) < 0)
1731 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1732 pa_log("device_id= not set");
1736 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1740 SND_PCM_STREAM_CAPTURE,
1741 &period_frames, &buffer_frames, tsched_frames,
1745 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1747 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1750 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1754 SND_PCM_STREAM_CAPTURE,
1755 &period_frames, &buffer_frames, tsched_frames,
1756 &b, &d, profile_set, &mapping)))
1761 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1762 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1765 SND_PCM_STREAM_CAPTURE,
1766 &period_frames, &buffer_frames, tsched_frames,
1771 pa_assert(u->device_name);
1772 pa_log_info("Successfully opened device %s.", u->device_name);
1774 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1775 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1780 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1782 if (use_mmap && !b) {
1783 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1784 u->use_mmap = use_mmap = FALSE;
1787 if (use_tsched && (!b || !d)) {
1788 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1789 u->use_tsched = use_tsched = FALSE;
1793 pa_log_info("Successfully enabled mmap() mode.");
1796 pa_log_info("Successfully enabled timer-based scheduling mode.");
1798 /* ALSA might tweak the sample spec, so recalculate the frame size */
1799 frame_size = pa_frame_size(&ss);
1801 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1803 pa_source_new_data_init(&data);
1804 data.driver = driver;
1807 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1809 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1810 * variable instead of using &data.namereg_fail directly, because
1811 * data.namereg_fail is a bitfield and taking the address of a bitfield
1812 * variable is impossible. */
1813 namereg_fail = data.namereg_fail;
1814 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1815 pa_log("Failed to parse namereg_fail argument.");
1816 pa_source_new_data_done(&data);
1819 data.namereg_fail = namereg_fail;
1821 pa_source_new_data_set_sample_spec(&data, &ss);
1822 pa_source_new_data_set_channel_map(&data, &map);
1824 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1825 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1826 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1827 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1828 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1831 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1832 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1835 pa_alsa_init_description(data.proplist);
1837 if (u->control_device)
1838 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1840 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1841 pa_log("Invalid properties");
1842 pa_source_new_data_done(&data);
1846 if (u->mixer_path_set)
1847 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1849 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1850 pa_source_new_data_done(&data);
1853 pa_log("Failed to create source object");
1857 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1858 &u->source->thread_info.volume_change_safety_margin) < 0) {
1859 pa_log("Failed to parse deferred_volume_safety_margin parameter");
1863 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1864 &u->source->thread_info.volume_change_extra_delay) < 0) {
1865 pa_log("Failed to parse deferred_volume_extra_delay parameter");
1869 u->source->parent.process_msg = source_process_msg;
1871 u->source->update_requested_latency = source_update_requested_latency_cb;
1872 u->source->set_state = source_set_state_cb;
1873 u->source->set_port = source_set_port_cb;
1874 u->source->userdata = u;
1876 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1877 pa_source_set_rtpoll(u->source, u->rtpoll);
1879 u->frame_size = frame_size;
1880 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1881 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1882 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1884 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1885 (double) u->hwbuf_size / (double) u->fragment_size,
1886 (long unsigned) u->fragment_size,
1887 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1888 (long unsigned) u->hwbuf_size,
1889 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1891 if (u->use_tsched) {
1892 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1894 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1895 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1897 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1898 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1900 fix_min_sleep_wakeup(u);
1901 fix_tsched_watermark(u);
1903 pa_source_set_latency_range(u->source,
1905 pa_bytes_to_usec(u->hwbuf_size, &ss));
1907 pa_log_info("Time scheduling watermark is %0.2fms",
1908 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1910 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1914 if (update_sw_params(u) < 0)
1917 if (setup_mixer(u, ignore_dB) < 0)
1920 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1922 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1923 pa_log("Failed to create thread.");
1927 /* Get initial mixer settings */
1928 if (data.volume_is_set) {
1929 if (u->source->set_volume)
1930 u->source->set_volume(u->source);
1932 if (u->source->get_volume)
1933 u->source->get_volume(u->source);
1936 if (data.muted_is_set) {
1937 if (u->source->set_mute)
1938 u->source->set_mute(u->source);
1940 if (u->source->get_mute)
1941 u->source->get_mute(u->source);
1944 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
1945 u->source->write_volume(u->source);
1947 pa_source_put(u->source);
1950 pa_alsa_profile_set_free(profile_set);
1960 pa_alsa_profile_set_free(profile_set);
1965 static void userdata_free(struct userdata *u) {
1969 pa_source_unlink(u->source);
1972 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1973 pa_thread_free(u->thread);
1976 pa_thread_mq_done(&u->thread_mq);
1979 pa_source_unref(u->source);
1982 pa_alsa_mixer_pdata_free(u->mixer_pd);
1984 if (u->alsa_rtpoll_item)
1985 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1988 pa_rtpoll_free(u->rtpoll);
1990 if (u->pcm_handle) {
1991 snd_pcm_drop(u->pcm_handle);
1992 snd_pcm_close(u->pcm_handle);
1996 pa_alsa_fdlist_free(u->mixer_fdl);
1998 if (u->mixer_path_set)
1999 pa_alsa_path_set_free(u->mixer_path_set);
2000 else if (u->mixer_path)
2001 pa_alsa_path_free(u->mixer_path);
2003 if (u->mixer_handle)
2004 snd_mixer_close(u->mixer_handle);
2007 pa_smoother_free(u->smoother);
2012 pa_xfree(u->device_name);
2013 pa_xfree(u->control_device);
2017 void pa_alsa_source_free(pa_source *s) {
2020 pa_source_assert_ref(s);
2021 pa_assert_se(u = s->userdata);