2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
52 #include <modules/reserve-wrap.h>
54 #include "alsa-util.h"
55 #include "alsa-source.h"
57 /* #define DEBUG_TIMING */
59 #define DEFAULT_DEVICE "default"
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
75 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
88 pa_thread_mq thread_mq;
91 snd_pcm_t *pcm_handle;
93 pa_alsa_fdlist *mixer_fdl;
94 pa_alsa_mixer_pdata *mixer_pd;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
99 pa_cvolume hardware_volume;
111 watermark_inc_threshold,
112 watermark_dec_threshold;
114 pa_usec_t watermark_dec_not_before;
116 char *device_name; /* name of the PCM device */
117 char *control_device; /* name of the control device */
119 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
123 pa_rtpoll_item *alsa_rtpoll_item;
125 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
127 pa_smoother *smoother;
129 pa_usec_t smoother_interval;
130 pa_usec_t last_smoother_update;
132 pa_reserve_wrapper *reserve;
133 pa_hook_slot *reserve_slot;
134 pa_reserve_monitor_wrapper *monitor;
135 pa_hook_slot *monitor_slot;
138 static void userdata_free(struct userdata *u);
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
144 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145 return PA_HOOK_CANCEL;
150 static void reserve_done(struct userdata *u) {
153 if (u->reserve_slot) {
154 pa_hook_slot_free(u->reserve_slot);
155 u->reserve_slot = NULL;
159 pa_reserve_wrapper_unref(u->reserve);
164 static void reserve_update(struct userdata *u) {
165 const char *description;
168 if (!u->source || !u->reserve)
171 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
175 static int reserve_init(struct userdata *u, const char *dname) {
184 if (pa_in_system_mode())
187 if (!(rname = pa_alsa_get_reserve_name(dname)))
190 /* We are resuming, try to lock the device */
191 u->reserve = pa_reserve_wrapper_get(u->core, rname);
199 pa_assert(!u->reserve_slot);
200 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
211 b = PA_PTR_TO_UINT(busy) && !u->reserve;
213 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
217 static void monitor_done(struct userdata *u) {
220 if (u->monitor_slot) {
221 pa_hook_slot_free(u->monitor_slot);
222 u->monitor_slot = NULL;
226 pa_reserve_monitor_wrapper_unref(u->monitor);
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
237 if (pa_in_system_mode())
240 if (!(rname = pa_alsa_get_reserve_name(dname)))
243 /* We are resuming, try to lock the device */
244 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
250 pa_assert(!u->monitor_slot);
251 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257 size_t max_use, max_use_2;
260 pa_assert(u->use_tsched);
262 max_use = u->hwbuf_size - u->hwbuf_unused;
263 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
265 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
268 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
272 static void fix_tsched_watermark(struct userdata *u) {
275 pa_assert(u->use_tsched);
277 max_use = u->hwbuf_size - u->hwbuf_unused;
279 if (u->tsched_watermark > max_use - u->min_sleep)
280 u->tsched_watermark = max_use - u->min_sleep;
282 if (u->tsched_watermark < u->min_wakeup)
283 u->tsched_watermark = u->min_wakeup;
286 static void increase_watermark(struct userdata *u) {
287 size_t old_watermark;
288 pa_usec_t old_min_latency, new_min_latency;
291 pa_assert(u->use_tsched);
293 /* First, just try to increase the watermark */
294 old_watermark = u->tsched_watermark;
295 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296 fix_tsched_watermark(u);
298 if (old_watermark != u->tsched_watermark) {
299 pa_log_info("Increasing wakeup watermark to %0.2f ms",
300 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
304 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305 old_min_latency = u->source->thread_info.min_latency;
306 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
309 if (old_min_latency != new_min_latency) {
310 pa_log_info("Increasing minimal latency to %0.2f ms",
311 (double) new_min_latency / PA_USEC_PER_MSEC);
313 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
316 /* When we reach this we're officialy fucked! */
319 static void decrease_watermark(struct userdata *u) {
320 size_t old_watermark;
324 pa_assert(u->use_tsched);
326 now = pa_rtclock_now();
328 if (u->watermark_dec_not_before <= 0)
331 if (u->watermark_dec_not_before > now)
334 old_watermark = u->tsched_watermark;
336 if (u->tsched_watermark < u->watermark_dec_step)
337 u->tsched_watermark = u->tsched_watermark / 2;
339 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
341 fix_tsched_watermark(u);
343 if (old_watermark != u->tsched_watermark)
344 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
347 /* We don't change the latency range*/
350 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
356 pa_assert(sleep_usec);
357 pa_assert(process_usec);
360 pa_assert(u->use_tsched);
362 usec = pa_source_get_requested_latency_within_thread(u->source);
364 if (usec == (pa_usec_t) -1)
365 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
367 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
372 *sleep_usec = usec - wm;
376 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377 (unsigned long) (usec / PA_USEC_PER_MSEC),
378 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
383 static int try_recover(struct userdata *u, const char *call, int err) {
388 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
390 pa_assert(err != -EAGAIN);
393 pa_log_debug("%s: Buffer overrun!", call);
395 if (err == -ESTRPIPE)
396 pa_log_debug("%s: System suspended!", call);
398 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399 pa_log("%s: %s", call, pa_alsa_strerror(err));
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408 size_t left_to_record;
409 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410 pa_bool_t overrun = FALSE;
412 /* We use <= instead of < for this check here because an overrun
413 * only happens after the last sample was processed, not already when
414 * it is removed from the buffer. This is particularly important
415 * when block transfer is used. */
417 if (n_bytes <= rec_space)
418 left_to_record = rec_space - n_bytes;
421 /* We got a dropout. What a mess! */
429 if (pa_log_ratelimit(PA_LOG_INFO))
430 pa_log_info("Overrun!");
434 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
438 pa_bool_t reset_not_before = TRUE;
440 if (overrun || left_to_record < u->watermark_inc_threshold)
441 increase_watermark(u);
442 else if (left_to_record > u->watermark_dec_threshold) {
443 reset_not_before = FALSE;
445 /* We decrease the watermark only if have actually
446 * been woken up by a timeout. If something else woke
447 * us up it's too easy to fulfill the deadlines... */
450 decrease_watermark(u);
453 if (reset_not_before)
454 u->watermark_dec_not_before = 0;
457 return left_to_record;
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461 pa_bool_t work_done = FALSE;
462 pa_usec_t max_sleep_usec = 0, process_usec = 0;
463 size_t left_to_record;
467 pa_source_assert_ref(u->source);
470 hw_sleep_time(u, &max_sleep_usec, &process_usec);
476 pa_bool_t after_avail = TRUE;
478 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
480 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
486 n_bytes = (size_t) n * u->frame_size;
489 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
497 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
499 pa_log_debug("Not reading, because too early.");
504 if (PA_UNLIKELY(n_bytes <= 0)) {
508 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
517 pa_log_debug("Not reading, because not necessary.");
525 pa_log_debug("Not filling up, because already too many iterations.");
534 pa_log_debug("Reading");
541 const snd_pcm_channel_area_t *areas;
542 snd_pcm_uframes_t offset, frames;
543 snd_pcm_sframes_t sframes;
545 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
548 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
550 if (!after_avail && err == -EAGAIN)
553 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
559 /* Make sure that if these memblocks need to be copied they will fit into one slot */
560 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
561 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
563 if (!after_avail && frames == 0)
566 pa_assert(frames > 0);
569 /* Check these are multiples of 8 bit */
570 pa_assert((areas[0].first & 7) == 0);
571 pa_assert((areas[0].step & 7)== 0);
573 /* We assume a single interleaved memory buffer */
574 pa_assert((areas[0].first >> 3) == 0);
575 pa_assert((areas[0].step >> 3) == u->frame_size);
577 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
579 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580 chunk.length = pa_memblock_get_length(chunk.memblock);
583 pa_source_post(u->source, &chunk);
584 pa_memblock_unref_fixed(chunk.memblock);
586 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
588 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
596 u->read_count += frames * u->frame_size;
599 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 if ((size_t) frames * u->frame_size >= n_bytes)
605 n_bytes -= (size_t) frames * u->frame_size;
610 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
611 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
613 if (*sleep_usec > process_usec)
614 *sleep_usec -= process_usec;
619 return work_done ? 1 : 0;
622 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
623 int work_done = FALSE;
624 pa_usec_t max_sleep_usec = 0, process_usec = 0;
625 size_t left_to_record;
629 pa_source_assert_ref(u->source);
632 hw_sleep_time(u, &max_sleep_usec, &process_usec);
638 pa_bool_t after_avail = TRUE;
640 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
642 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
648 n_bytes = (size_t) n * u->frame_size;
649 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
654 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
657 if (PA_UNLIKELY(n_bytes <= 0)) {
661 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
662 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
663 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
664 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
674 pa_log_debug("Not filling up, because already too many iterations.");
684 snd_pcm_sframes_t frames;
687 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
689 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
691 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
692 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
694 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
696 p = pa_memblock_acquire(chunk.memblock);
697 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
698 pa_memblock_release(chunk.memblock);
700 if (PA_UNLIKELY(frames < 0)) {
701 pa_memblock_unref(chunk.memblock);
703 if (!after_avail && (int) frames == -EAGAIN)
706 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
712 if (!after_avail && frames == 0) {
713 pa_memblock_unref(chunk.memblock);
717 pa_assert(frames > 0);
721 chunk.length = (size_t) frames * u->frame_size;
723 pa_source_post(u->source, &chunk);
724 pa_memblock_unref(chunk.memblock);
728 u->read_count += frames * u->frame_size;
730 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
732 if ((size_t) frames * u->frame_size >= n_bytes)
735 n_bytes -= (size_t) frames * u->frame_size;
740 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
741 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
743 if (*sleep_usec > process_usec)
744 *sleep_usec -= process_usec;
749 return work_done ? 1 : 0;
752 static void update_smoother(struct userdata *u) {
753 snd_pcm_sframes_t delay = 0;
756 pa_usec_t now1 = 0, now2;
757 snd_pcm_status_t *status;
759 snd_pcm_status_alloca(&status);
762 pa_assert(u->pcm_handle);
764 /* Let's update the time smoother */
766 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
771 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
774 snd_htimestamp_t htstamp = { 0, 0 };
775 snd_pcm_status_get_htstamp(status, &htstamp);
776 now1 = pa_timespec_load(&htstamp);
779 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
781 now1 = pa_rtclock_now();
783 /* check if the time since the last update is bigger than the interval */
784 if (u->last_smoother_update > 0)
785 if (u->last_smoother_update + u->smoother_interval > now1)
788 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
791 pa_smoother_put(u->smoother, now1, now2);
793 u->last_smoother_update = now1;
794 /* exponentially increase the update interval up to the MAX limit */
795 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
798 static pa_usec_t source_get_latency(struct userdata *u) {
800 pa_usec_t now1, now2;
804 now1 = pa_rtclock_now();
805 now2 = pa_smoother_get(u->smoother, now1);
807 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
809 return delay >= 0 ? (pa_usec_t) delay : 0;
812 static int build_pollfd(struct userdata *u) {
814 pa_assert(u->pcm_handle);
816 if (u->alsa_rtpoll_item)
817 pa_rtpoll_item_free(u->alsa_rtpoll_item);
819 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
825 /* Called from IO context */
826 static int suspend(struct userdata *u) {
828 pa_assert(u->pcm_handle);
830 pa_smoother_pause(u->smoother, pa_rtclock_now());
833 snd_pcm_close(u->pcm_handle);
834 u->pcm_handle = NULL;
836 if (u->alsa_rtpoll_item) {
837 pa_rtpoll_item_free(u->alsa_rtpoll_item);
838 u->alsa_rtpoll_item = NULL;
841 pa_log_info("Device suspended...");
846 /* Called from IO context */
847 static int update_sw_params(struct userdata *u) {
848 snd_pcm_uframes_t avail_min;
853 /* Use the full buffer if no one asked us for anything specific */
859 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
862 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
864 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
866 /* We need at least one sample in our buffer */
868 if (PA_UNLIKELY(b < u->frame_size))
871 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
874 fix_min_sleep_wakeup(u);
875 fix_tsched_watermark(u);
878 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
883 pa_usec_t sleep_usec, process_usec;
885 hw_sleep_time(u, &sleep_usec, &process_usec);
886 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
889 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
891 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
892 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
899 /* Called from IO context */
900 static int unsuspend(struct userdata *u) {
904 snd_pcm_uframes_t period_size, buffer_size;
907 pa_assert(!u->pcm_handle);
909 pa_log_info("Trying resume...");
911 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
913 SND_PCM_NO_AUTO_RESAMPLE|
914 SND_PCM_NO_AUTO_CHANNELS|
915 SND_PCM_NO_AUTO_FORMAT)) < 0) {
916 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
920 ss = u->source->sample_spec;
921 period_size = u->fragment_size / u->frame_size;
922 buffer_size = u->hwbuf_size / u->frame_size;
926 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
927 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
931 if (b != u->use_mmap || d != u->use_tsched) {
932 pa_log_warn("Resume failed, couldn't get original access mode.");
936 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
937 pa_log_warn("Resume failed, couldn't restore original sample settings.");
941 if (period_size*u->frame_size != u->fragment_size ||
942 buffer_size*u->frame_size != u->hwbuf_size) {
943 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
944 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
945 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
949 if (update_sw_params(u) < 0)
952 if (build_pollfd(u) < 0)
955 /* FIXME: We need to reload the volume somehow */
958 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
959 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
960 u->last_smoother_update = 0;
964 pa_log_info("Resumed successfully...");
970 snd_pcm_close(u->pcm_handle);
971 u->pcm_handle = NULL;
977 /* Called from IO context */
978 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
979 struct userdata *u = PA_SOURCE(o)->userdata;
983 case PA_SOURCE_MESSAGE_GET_LATENCY: {
987 r = source_get_latency(u);
989 *((pa_usec_t*) data) = r;
994 case PA_SOURCE_MESSAGE_SET_STATE:
996 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
998 case PA_SOURCE_SUSPENDED: {
1001 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1003 if ((r = suspend(u)) < 0)
1009 case PA_SOURCE_IDLE:
1010 case PA_SOURCE_RUNNING: {
1013 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1014 if (build_pollfd(u) < 0)
1018 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1019 if ((r = unsuspend(u)) < 0)
1026 case PA_SOURCE_UNLINKED:
1027 case PA_SOURCE_INIT:
1028 case PA_SOURCE_INVALID_STATE:
1035 return pa_source_process_msg(o, code, data, offset, chunk);
1038 /* Called from main context */
1039 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1040 pa_source_state_t old_state;
1043 pa_source_assert_ref(s);
1044 pa_assert_se(u = s->userdata);
1046 old_state = pa_source_get_state(u->source);
1048 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1050 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1051 if (reserve_init(u, u->device_name) < 0)
1052 return -PA_ERR_BUSY;
1057 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1058 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1061 pa_assert(u->mixer_handle);
1063 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1066 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1069 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1070 pa_source_get_volume(u->source, TRUE);
1071 pa_source_get_mute(u->source, TRUE);
1077 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1078 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1081 pa_assert(u->mixer_handle);
1083 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1086 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1089 if (mask & SND_CTL_EVENT_MASK_VALUE)
1090 pa_source_update_volume_and_mute(u->source);
1095 static void source_get_volume_cb(pa_source *s) {
1096 struct userdata *u = s->userdata;
1098 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1101 pa_assert(u->mixer_path);
1102 pa_assert(u->mixer_handle);
1104 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1107 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1108 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1110 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1112 if (u->mixer_path->has_dB) {
1113 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1115 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1118 if (pa_cvolume_equal(&u->hardware_volume, &r))
1121 s->real_volume = u->hardware_volume = r;
1123 /* Hmm, so the hardware volume changed, let's reset our software volume */
1124 if (u->mixer_path->has_dB)
1125 pa_source_set_soft_volume(s, NULL);
1128 static void source_set_volume_cb(pa_source *s) {
1129 struct userdata *u = s->userdata;
1131 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1132 pa_bool_t sync_volume = !!(s->flags & PA_SOURCE_SYNC_VOLUME);
1135 pa_assert(u->mixer_path);
1136 pa_assert(u->mixer_handle);
1138 /* Shift up by the base volume */
1139 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1141 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1144 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1145 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1147 u->hardware_volume = r;
1149 if (u->mixer_path->has_dB) {
1150 pa_cvolume new_soft_volume;
1151 pa_bool_t accurate_enough;
1152 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1154 /* Match exactly what the user requested by software */
1155 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1157 /* If the adjustment to do in software is only minimal we
1158 * can skip it. That saves us CPU at the expense of a bit of
1161 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1162 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1164 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1165 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1166 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1167 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1168 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1169 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1170 pa_yes_no(accurate_enough));
1171 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1173 if (!accurate_enough)
1174 s->soft_volume = new_soft_volume;
1177 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1179 /* We can't match exactly what the user requested, hence let's
1180 * at least tell the user about it */
1186 static void source_write_volume_cb(pa_source *s) {
1187 struct userdata *u = s->userdata;
1188 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1191 pa_assert(u->mixer_path);
1192 pa_assert(u->mixer_handle);
1193 pa_assert(s->flags & PA_SOURCE_SYNC_VOLUME);
1195 /* Shift up by the base volume */
1196 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1198 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1199 pa_log_error("Writing HW volume failed");
1202 pa_bool_t accurate_enough;
1204 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1205 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1207 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1209 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1210 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1212 if (!accurate_enough) {
1214 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1215 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1218 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1219 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1220 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1221 pa_log_debug(" in dB: %s (request) != %s",
1222 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1223 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1228 static void source_get_mute_cb(pa_source *s) {
1229 struct userdata *u = s->userdata;
1233 pa_assert(u->mixer_path);
1234 pa_assert(u->mixer_handle);
1236 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1242 static void source_set_mute_cb(pa_source *s) {
1243 struct userdata *u = s->userdata;
1246 pa_assert(u->mixer_path);
1247 pa_assert(u->mixer_handle);
1249 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1252 static void mixer_volume_init(struct userdata *u) {
1255 if (!u->mixer_path->has_volume) {
1256 pa_source_set_write_volume_callback(u->source, NULL);
1257 pa_source_set_get_volume_callback(u->source, NULL);
1258 pa_source_set_set_volume_callback(u->source, NULL);
1260 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1262 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1263 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1265 if (u->mixer_path->has_dB && u->sync_volume) {
1266 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1267 pa_log_info("Successfully enabled synchronous volume.");
1269 pa_source_set_write_volume_callback(u->source, NULL);
1271 if (u->mixer_path->has_dB) {
1272 pa_source_enable_decibel_volume(u->source, TRUE);
1273 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1275 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1276 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1278 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1280 pa_source_enable_decibel_volume(u->source, FALSE);
1281 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1283 u->source->base_volume = PA_VOLUME_NORM;
1284 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1287 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1290 if (!u->mixer_path->has_mute) {
1291 pa_source_set_get_mute_callback(u->source, NULL);
1292 pa_source_set_set_mute_callback(u->source, NULL);
1293 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1295 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1296 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1297 pa_log_info("Using hardware mute control.");
1301 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1302 struct userdata *u = s->userdata;
1303 pa_alsa_port_data *data;
1307 pa_assert(u->mixer_handle);
1309 data = PA_DEVICE_PORT_DATA(p);
1311 pa_assert_se(u->mixer_path = data->path);
1312 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1314 mixer_volume_init(u);
1317 pa_alsa_setting_select(data->setting, u->mixer_handle);
1327 static void source_update_requested_latency_cb(pa_source *s) {
1328 struct userdata *u = s->userdata;
1330 pa_assert(u->use_tsched); /* only when timer scheduling is used
1331 * we can dynamically adjust the
1337 update_sw_params(u);
1340 static void thread_func(void *userdata) {
1341 struct userdata *u = userdata;
1342 unsigned short revents = 0;
1346 pa_log_debug("Thread starting up");
1348 if (u->core->realtime_scheduling)
1349 pa_make_realtime(u->core->realtime_priority);
1351 pa_thread_mq_install(&u->thread_mq);
1355 pa_usec_t rtpoll_sleep = 0;
1358 pa_log_debug("Loop");
1361 /* Read some data and pass it to the sources */
1362 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1364 pa_usec_t sleep_usec = 0;
1365 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1368 pa_log_info("Starting capture.");
1369 snd_pcm_start(u->pcm_handle);
1371 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1377 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1379 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1384 /* pa_log_debug("work_done = %i", work_done); */
1389 if (u->use_tsched) {
1392 /* OK, the capture buffer is now empty, let's
1393 * calculate when to wake up next */
1395 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1397 /* Convert from the sound card time domain to the
1398 * system time domain */
1399 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1401 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1403 /* We don't trust the conversion, so we wake up whatever comes first */
1404 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1408 if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1409 pa_usec_t volume_sleep;
1410 pa_source_volume_change_apply(u->source, &volume_sleep);
1411 if (volume_sleep > 0)
1412 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1415 if (rtpoll_sleep > 0)
1416 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1418 pa_rtpoll_set_timer_disabled(u->rtpoll);
1420 /* Hmm, nothing to do. Let's sleep */
1421 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1424 if (u->source->flags & PA_SOURCE_SYNC_VOLUME)
1425 pa_source_volume_change_apply(u->source, NULL);
1430 /* Tell ALSA about this and process its response */
1431 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1432 struct pollfd *pollfd;
1436 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1438 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1439 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1443 if (revents & ~POLLIN) {
1444 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1449 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1450 pa_log_debug("Wakeup from ALSA!");
1457 /* If this was no regular exit from the loop we have to continue
1458 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1459 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1460 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1463 pa_log_debug("Thread shutting down");
1466 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1472 pa_assert(device_name);
1474 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1475 pa_source_new_data_set_name(data, n);
1476 data->namereg_fail = TRUE;
1480 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1481 data->namereg_fail = TRUE;
1483 n = device_id ? device_id : device_name;
1484 data->namereg_fail = FALSE;
1488 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1490 t = pa_sprintf_malloc("alsa_input.%s", n);
1492 pa_source_new_data_set_name(data, t);
1496 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1498 if (!mapping && !element)
1501 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1502 pa_log_info("Failed to find a working mixer device.");
1508 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1511 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1514 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1515 pa_alsa_path_dump(u->mixer_path);
1518 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1521 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1528 if (u->mixer_path_set) {
1529 pa_alsa_path_set_free(u->mixer_path_set);
1530 u->mixer_path_set = NULL;
1531 } else if (u->mixer_path) {
1532 pa_alsa_path_free(u->mixer_path);
1533 u->mixer_path = NULL;
1536 if (u->mixer_handle) {
1537 snd_mixer_close(u->mixer_handle);
1538 u->mixer_handle = NULL;
1542 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1543 pa_bool_t need_mixer_callback = FALSE;
1547 if (!u->mixer_handle)
1550 if (u->source->active_port) {
1551 pa_alsa_port_data *data;
1553 /* We have a list of supported paths, so let's activate the
1554 * one that has been chosen as active */
1556 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1557 u->mixer_path = data->path;
1559 pa_alsa_path_select(data->path, u->mixer_handle);
1562 pa_alsa_setting_select(data->setting, u->mixer_handle);
1566 if (!u->mixer_path && u->mixer_path_set)
1567 u->mixer_path = u->mixer_path_set->paths;
1569 if (u->mixer_path) {
1570 /* Hmm, we have only a single path, then let's activate it */
1572 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1574 if (u->mixer_path->settings)
1575 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1580 mixer_volume_init(u);
1582 /* Will we need to register callbacks? */
1583 if (u->mixer_path_set && u->mixer_path_set->paths) {
1586 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1587 if (p->has_volume || p->has_mute)
1588 need_mixer_callback = TRUE;
1591 else if (u->mixer_path)
1592 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1594 if (need_mixer_callback) {
1595 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1596 if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1597 u->mixer_pd = pa_alsa_mixer_pdata_new();
1598 mixer_callback = io_mixer_callback;
1600 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1601 pa_log("Failed to initialize file descriptor monitoring");
1605 u->mixer_fdl = pa_alsa_fdlist_new();
1606 mixer_callback = ctl_mixer_callback;
1608 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1609 pa_log("Failed to initialize file descriptor monitoring");
1614 if (u->mixer_path_set)
1615 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1617 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1623 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1625 struct userdata *u = NULL;
1626 const char *dev_id = NULL;
1627 pa_sample_spec ss, requested_ss;
1629 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1630 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1632 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1633 pa_source_new_data data;
1634 pa_alsa_profile_set *profile_set = NULL;
1639 ss = m->core->default_sample_spec;
1640 map = m->core->default_channel_map;
1641 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1642 pa_log("Failed to parse sample specification and channel map");
1647 frame_size = pa_frame_size(&ss);
1649 nfrags = m->core->default_n_fragments;
1650 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1652 frag_size = (uint32_t) frame_size;
1653 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1654 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1656 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1657 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1658 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1659 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1660 pa_log("Failed to parse buffer metrics");
1664 buffer_size = nfrags * frag_size;
1666 period_frames = frag_size/frame_size;
1667 buffer_frames = buffer_size/frame_size;
1668 tsched_frames = tsched_size/frame_size;
1670 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1671 pa_log("Failed to parse mmap argument.");
1675 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1676 pa_log("Failed to parse tsched argument.");
1680 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1681 pa_log("Failed to parse ignore_dB argument.");
1685 sync_volume = m->core->sync_volume;
1686 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1687 pa_log("Failed to parse sync_volume argument.");
1691 use_tsched = pa_alsa_may_tsched(use_tsched);
1693 u = pa_xnew0(struct userdata, 1);
1696 u->use_mmap = use_mmap;
1697 u->use_tsched = use_tsched;
1698 u->sync_volume = sync_volume;
1700 u->rtpoll = pa_rtpoll_new();
1701 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1703 u->smoother = pa_smoother_new(
1704 SMOOTHER_ADJUST_USEC,
1705 SMOOTHER_WINDOW_USEC,
1711 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1713 dev_id = pa_modargs_get_value(
1715 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1717 if (reserve_init(u, dev_id) < 0)
1720 if (reserve_monitor_init(u, dev_id) < 0)
1728 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1729 pa_log("device_id= not set");
1733 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1737 SND_PCM_STREAM_CAPTURE,
1738 &period_frames, &buffer_frames, tsched_frames,
1742 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1744 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1747 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1751 SND_PCM_STREAM_CAPTURE,
1752 &period_frames, &buffer_frames, tsched_frames,
1753 &b, &d, profile_set, &mapping)))
1758 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1759 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1762 SND_PCM_STREAM_CAPTURE,
1763 &period_frames, &buffer_frames, tsched_frames,
1768 pa_assert(u->device_name);
1769 pa_log_info("Successfully opened device %s.", u->device_name);
1771 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1772 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1777 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1779 if (use_mmap && !b) {
1780 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1781 u->use_mmap = use_mmap = FALSE;
1784 if (use_tsched && (!b || !d)) {
1785 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1786 u->use_tsched = use_tsched = FALSE;
1790 pa_log_info("Successfully enabled mmap() mode.");
1793 pa_log_info("Successfully enabled timer-based scheduling mode.");
1795 /* ALSA might tweak the sample spec, so recalculate the frame size */
1796 frame_size = pa_frame_size(&ss);
1798 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1800 pa_source_new_data_init(&data);
1801 data.driver = driver;
1804 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1806 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1807 * variable instead of using &data.namereg_fail directly, because
1808 * data.namereg_fail is a bitfield and taking the address of a bitfield
1809 * variable is impossible. */
1810 namereg_fail = data.namereg_fail;
1811 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1812 pa_log("Failed to parse namereg_fail argument.");
1813 pa_source_new_data_done(&data);
1816 data.namereg_fail = namereg_fail;
1818 pa_source_new_data_set_sample_spec(&data, &ss);
1819 pa_source_new_data_set_channel_map(&data, &map);
1821 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1822 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1823 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1824 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1825 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1828 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1829 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1832 pa_alsa_init_description(data.proplist);
1834 if (u->control_device)
1835 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1837 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1838 pa_log("Invalid properties");
1839 pa_source_new_data_done(&data);
1843 if (u->mixer_path_set)
1844 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1846 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1847 pa_source_new_data_done(&data);
1850 pa_log("Failed to create source object");
1854 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
1855 &u->source->thread_info.volume_change_safety_margin) < 0) {
1856 pa_log("Failed to parse sync_volume_safety_margin parameter");
1860 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
1861 &u->source->thread_info.volume_change_extra_delay) < 0) {
1862 pa_log("Failed to parse sync_volume_extra_delay parameter");
1866 u->source->parent.process_msg = source_process_msg;
1868 u->source->update_requested_latency = source_update_requested_latency_cb;
1869 u->source->set_state = source_set_state_cb;
1870 u->source->set_port = source_set_port_cb;
1871 u->source->userdata = u;
1873 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1874 pa_source_set_rtpoll(u->source, u->rtpoll);
1876 u->frame_size = frame_size;
1877 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1878 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1879 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1881 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1882 (double) u->hwbuf_size / (double) u->fragment_size,
1883 (long unsigned) u->fragment_size,
1884 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1885 (long unsigned) u->hwbuf_size,
1886 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1888 if (u->use_tsched) {
1889 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1891 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1892 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1894 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1895 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1897 fix_min_sleep_wakeup(u);
1898 fix_tsched_watermark(u);
1900 pa_source_set_latency_range(u->source,
1902 pa_bytes_to_usec(u->hwbuf_size, &ss));
1904 pa_log_info("Time scheduling watermark is %0.2fms",
1905 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1907 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1911 if (update_sw_params(u) < 0)
1914 if (setup_mixer(u, ignore_dB) < 0)
1917 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1919 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1920 pa_log("Failed to create thread.");
1924 /* Get initial mixer settings */
1925 if (data.volume_is_set) {
1926 if (u->source->set_volume)
1927 u->source->set_volume(u->source);
1929 if (u->source->get_volume)
1930 u->source->get_volume(u->source);
1933 if (data.muted_is_set) {
1934 if (u->source->set_mute)
1935 u->source->set_mute(u->source);
1937 if (u->source->get_mute)
1938 u->source->get_mute(u->source);
1941 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
1942 u->source->write_volume(u->source);
1944 pa_source_put(u->source);
1947 pa_alsa_profile_set_free(profile_set);
1957 pa_alsa_profile_set_free(profile_set);
1962 static void userdata_free(struct userdata *u) {
1966 pa_source_unlink(u->source);
1969 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1970 pa_thread_free(u->thread);
1973 pa_thread_mq_done(&u->thread_mq);
1976 pa_source_unref(u->source);
1979 pa_alsa_mixer_pdata_free(u->mixer_pd);
1981 if (u->alsa_rtpoll_item)
1982 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1985 pa_rtpoll_free(u->rtpoll);
1987 if (u->pcm_handle) {
1988 snd_pcm_drop(u->pcm_handle);
1989 snd_pcm_close(u->pcm_handle);
1993 pa_alsa_fdlist_free(u->mixer_fdl);
1995 if (u->mixer_path_set)
1996 pa_alsa_path_set_free(u->mixer_path_set);
1997 else if (u->mixer_path)
1998 pa_alsa_path_free(u->mixer_path);
2000 if (u->mixer_handle)
2001 snd_mixer_close(u->mixer_handle);
2004 pa_smoother_free(u->smoother);
2009 pa_xfree(u->device_name);
2010 pa_xfree(u->control_device);
2014 void pa_alsa_source_free(pa_source *s) {
2017 pa_source_assert_ref(s);
2018 pa_assert_se(u = s->userdata);