2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
54 #include <modules/reserve-wrap.h>
56 #include "alsa-util.h"
57 #include "alsa-source.h"
59 /* #define DEBUG_TIMING */
61 #define DEFAULT_DEVICE "default"
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
87 pa_thread_mq thread_mq;
90 snd_pcm_t *pcm_handle;
92 pa_alsa_fdlist *mixer_fdl;
93 snd_mixer_t *mixer_handle;
94 pa_alsa_path_set *mixer_path_set;
95 pa_alsa_path *mixer_path;
97 pa_cvolume hardware_volume;
109 watermark_inc_threshold,
110 watermark_dec_threshold;
112 pa_usec_t watermark_dec_not_before;
115 char *control_device;
117 pa_bool_t use_mmap:1, use_tsched:1;
119 pa_rtpoll_item *alsa_rtpoll_item;
121 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
123 pa_smoother *smoother;
125 pa_usec_t smoother_interval;
126 pa_usec_t last_smoother_update;
128 pa_reserve_wrapper *reserve;
129 pa_hook_slot *reserve_slot;
130 pa_reserve_monitor_wrapper *monitor;
131 pa_hook_slot *monitor_slot;
134 static void userdata_free(struct userdata *u);
136 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
140 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
141 return PA_HOOK_CANCEL;
146 static void reserve_done(struct userdata *u) {
149 if (u->reserve_slot) {
150 pa_hook_slot_free(u->reserve_slot);
151 u->reserve_slot = NULL;
155 pa_reserve_wrapper_unref(u->reserve);
160 static void reserve_update(struct userdata *u) {
161 const char *description;
164 if (!u->source || !u->reserve)
167 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
168 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
171 static int reserve_init(struct userdata *u, const char *dname) {
180 if (pa_in_system_mode())
183 /* We are resuming, try to lock the device */
184 if (!(rname = pa_alsa_get_reserve_name(dname)))
187 u->reserve = pa_reserve_wrapper_get(u->core, rname);
195 pa_assert(!u->reserve_slot);
196 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
201 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
207 b = PA_PTR_TO_UINT(busy) && !u->reserve;
209 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
213 static void monitor_done(struct userdata *u) {
216 if (u->monitor_slot) {
217 pa_hook_slot_free(u->monitor_slot);
218 u->monitor_slot = NULL;
222 pa_reserve_monitor_wrapper_unref(u->monitor);
227 static int reserve_monitor_init(struct userdata *u, const char *dname) {
233 if (pa_in_system_mode())
236 /* We are resuming, try to lock the device */
237 if (!(rname = pa_alsa_get_reserve_name(dname)))
240 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
246 pa_assert(!u->monitor_slot);
247 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
252 static void fix_min_sleep_wakeup(struct userdata *u) {
253 size_t max_use, max_use_2;
255 pa_assert(u->use_tsched);
257 max_use = u->hwbuf_size - u->hwbuf_unused;
258 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
260 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
261 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
263 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
264 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
267 static void fix_tsched_watermark(struct userdata *u) {
270 pa_assert(u->use_tsched);
272 max_use = u->hwbuf_size - u->hwbuf_unused;
274 if (u->tsched_watermark > max_use - u->min_sleep)
275 u->tsched_watermark = max_use - u->min_sleep;
277 if (u->tsched_watermark < u->min_wakeup)
278 u->tsched_watermark = u->min_wakeup;
281 static void increase_watermark(struct userdata *u) {
282 size_t old_watermark;
283 pa_usec_t old_min_latency, new_min_latency;
286 pa_assert(u->use_tsched);
288 /* First, just try to increase the watermark */
289 old_watermark = u->tsched_watermark;
290 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
291 fix_tsched_watermark(u);
293 if (old_watermark != u->tsched_watermark) {
294 pa_log_info("Increasing wakeup watermark to %0.2f ms",
295 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
299 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
300 old_min_latency = u->source->thread_info.min_latency;
301 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
302 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
304 if (old_min_latency != new_min_latency) {
305 pa_log_info("Increasing minimal latency to %0.2f ms",
306 (double) new_min_latency / PA_USEC_PER_MSEC);
308 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
311 /* When we reach this we're officialy fucked! */
314 static void decrease_watermark(struct userdata *u) {
315 size_t old_watermark;
319 pa_assert(u->use_tsched);
321 now = pa_rtclock_now();
323 if (u->watermark_dec_not_before <= 0)
326 if (u->watermark_dec_not_before > now)
329 old_watermark = u->tsched_watermark;
331 if (u->tsched_watermark < u->watermark_dec_step)
332 u->tsched_watermark = u->tsched_watermark / 2;
334 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
336 fix_tsched_watermark(u);
338 if (old_watermark != u->tsched_watermark)
339 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
340 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
342 /* We don't change the latency range*/
345 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
348 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
351 pa_assert(sleep_usec);
352 pa_assert(process_usec);
355 pa_assert(u->use_tsched);
357 usec = pa_source_get_requested_latency_within_thread(u->source);
359 if (usec == (pa_usec_t) -1)
360 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
362 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
367 *sleep_usec = usec - wm;
371 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
372 (unsigned long) (usec / PA_USEC_PER_MSEC),
373 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
374 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 static int try_recover(struct userdata *u, const char *call, int err) {
385 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
387 pa_assert(err != -EAGAIN);
390 pa_log_debug("%s: Buffer overrun!", call);
392 if (err == -ESTRPIPE)
393 pa_log_debug("%s: System suspended!", call);
395 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
396 pa_log("%s: %s", call, pa_alsa_strerror(err));
400 snd_pcm_start(u->pcm_handle);
404 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
405 size_t left_to_record;
406 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
407 pa_bool_t overrun = FALSE;
409 /* We use <= instead of < for this check here because an overrun
410 * only happens after the last sample was processed, not already when
411 * it is removed from the buffer. This is particularly important
412 * when block transfer is used. */
414 if (n_bytes <= rec_space)
415 left_to_record = rec_space - n_bytes;
418 /* We got a dropout. What a mess! */
426 if (pa_log_ratelimit())
427 pa_log_info("Overrun!");
431 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
435 pa_bool_t reset_not_before = TRUE;
437 if (overrun || left_to_record < u->watermark_inc_threshold)
438 increase_watermark(u);
439 else if (left_to_record > u->watermark_dec_threshold) {
440 reset_not_before = FALSE;
442 /* We decrease the watermark only if have actually been
443 * woken up by a timeout. If something else woke us up
444 * it's too easy to fulfill the deadlines... */
447 decrease_watermark(u);
450 if (reset_not_before)
451 u->watermark_dec_not_before = 0;
454 return left_to_record;
457 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
458 pa_bool_t work_done = FALSE;
459 pa_usec_t max_sleep_usec = 0, process_usec = 0;
460 size_t left_to_record;
464 pa_source_assert_ref(u->source);
467 hw_sleep_time(u, &max_sleep_usec, &process_usec);
473 pa_bool_t after_avail = TRUE;
475 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
477 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
483 n_bytes = (size_t) n * u->frame_size;
486 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
489 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
494 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
496 pa_log_debug("Not reading, because too early.");
501 if (PA_UNLIKELY(n_bytes <= 0)) {
505 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
506 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
507 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
508 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
514 pa_log_debug("Not reading, because not necessary.");
521 pa_log_debug("Not filling up, because already too many iterations.");
530 pa_log_debug("Reading");
535 const snd_pcm_channel_area_t *areas;
536 snd_pcm_uframes_t offset, frames;
539 snd_pcm_sframes_t sframes;
541 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
543 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
545 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
547 if (!after_avail && err == -EAGAIN)
550 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
556 /* Make sure that if these memblocks need to be copied they will fit into one slot */
557 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
558 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
560 if (!after_avail && frames == 0)
563 pa_assert(frames > 0);
566 /* Check these are multiples of 8 bit */
567 pa_assert((areas[0].first & 7) == 0);
568 pa_assert((areas[0].step & 7)== 0);
570 /* We assume a single interleaved memory buffer */
571 pa_assert((areas[0].first >> 3) == 0);
572 pa_assert((areas[0].step >> 3) == u->frame_size);
574 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
576 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
577 chunk.length = pa_memblock_get_length(chunk.memblock);
580 pa_source_post(u->source, &chunk);
581 pa_memblock_unref_fixed(chunk.memblock);
583 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
585 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
593 u->read_count += frames * u->frame_size;
596 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
599 if ((size_t) frames * u->frame_size >= n_bytes)
602 n_bytes -= (size_t) frames * u->frame_size;
606 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
608 if (*sleep_usec > process_usec)
609 *sleep_usec -= process_usec;
613 return work_done ? 1 : 0;
616 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
617 int work_done = FALSE;
618 pa_usec_t max_sleep_usec = 0, process_usec = 0;
619 size_t left_to_record;
623 pa_source_assert_ref(u->source);
626 hw_sleep_time(u, &max_sleep_usec, &process_usec);
632 pa_bool_t after_avail = TRUE;
634 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
636 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
642 n_bytes = (size_t) n * u->frame_size;
643 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
648 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
651 if (PA_UNLIKELY(n_bytes <= 0)) {
655 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
656 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
657 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
658 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
668 pa_log_debug("Not filling up, because already too many iterations.");
678 snd_pcm_sframes_t frames;
681 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
683 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
685 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
686 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
688 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
690 p = pa_memblock_acquire(chunk.memblock);
691 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
692 pa_memblock_release(chunk.memblock);
694 if (PA_UNLIKELY(frames < 0)) {
695 pa_memblock_unref(chunk.memblock);
697 if (!after_avail && (int) frames == -EAGAIN)
700 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
706 if (!after_avail && frames == 0) {
707 pa_memblock_unref(chunk.memblock);
711 pa_assert(frames > 0);
715 chunk.length = (size_t) frames * u->frame_size;
717 pa_source_post(u->source, &chunk);
718 pa_memblock_unref(chunk.memblock);
722 u->read_count += frames * u->frame_size;
724 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
726 if ((size_t) frames * u->frame_size >= n_bytes)
729 n_bytes -= (size_t) frames * u->frame_size;
733 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
735 if (*sleep_usec > process_usec)
736 *sleep_usec -= process_usec;
740 return work_done ? 1 : 0;
743 static void update_smoother(struct userdata *u) {
744 snd_pcm_sframes_t delay = 0;
747 pa_usec_t now1 = 0, now2;
748 snd_pcm_status_t *status;
750 snd_pcm_status_alloca(&status);
753 pa_assert(u->pcm_handle);
755 /* Let's update the time smoother */
757 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
758 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
762 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
763 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
765 snd_htimestamp_t htstamp = { 0, 0 };
766 snd_pcm_status_get_htstamp(status, &htstamp);
767 now1 = pa_timespec_load(&htstamp);
770 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
772 now1 = pa_rtclock_now();
774 /* check if the time since the last update is bigger than the interval */
775 if (u->last_smoother_update > 0)
776 if (u->last_smoother_update + u->smoother_interval > now1)
779 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
780 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
782 pa_smoother_put(u->smoother, now1, now2);
784 u->last_smoother_update = now1;
785 /* exponentially increase the update interval up to the MAX limit */
786 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
789 static pa_usec_t source_get_latency(struct userdata *u) {
791 pa_usec_t now1, now2;
795 now1 = pa_rtclock_now();
796 now2 = pa_smoother_get(u->smoother, now1);
798 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
800 return delay >= 0 ? (pa_usec_t) delay : 0;
803 static int build_pollfd(struct userdata *u) {
805 pa_assert(u->pcm_handle);
807 if (u->alsa_rtpoll_item)
808 pa_rtpoll_item_free(u->alsa_rtpoll_item);
810 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
816 static int suspend(struct userdata *u) {
818 pa_assert(u->pcm_handle);
820 pa_smoother_pause(u->smoother, pa_rtclock_now());
823 snd_pcm_close(u->pcm_handle);
824 u->pcm_handle = NULL;
826 if (u->alsa_rtpoll_item) {
827 pa_rtpoll_item_free(u->alsa_rtpoll_item);
828 u->alsa_rtpoll_item = NULL;
831 pa_log_info("Device suspended...");
836 static int update_sw_params(struct userdata *u) {
837 snd_pcm_uframes_t avail_min;
842 /* Use the full buffer if noone asked us for anything specific */
848 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
851 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
853 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
855 /* We need at least one sample in our buffer */
857 if (PA_UNLIKELY(b < u->frame_size))
860 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
863 fix_min_sleep_wakeup(u);
864 fix_tsched_watermark(u);
867 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
872 pa_usec_t sleep_usec, process_usec;
874 hw_sleep_time(u, &sleep_usec, &process_usec);
875 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
878 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
880 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
881 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
888 static int unsuspend(struct userdata *u) {
892 snd_pcm_uframes_t period_size, buffer_size;
895 pa_assert(!u->pcm_handle);
897 pa_log_info("Trying resume...");
899 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
901 SND_PCM_NO_AUTO_RESAMPLE|
902 SND_PCM_NO_AUTO_CHANNELS|
903 SND_PCM_NO_AUTO_FORMAT)) < 0) {
904 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
908 ss = u->source->sample_spec;
909 period_size = u->fragment_size / u->frame_size;
910 buffer_size = u->hwbuf_size / u->frame_size;
914 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
915 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
919 if (b != u->use_mmap || d != u->use_tsched) {
920 pa_log_warn("Resume failed, couldn't get original access mode.");
924 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
925 pa_log_warn("Resume failed, couldn't restore original sample settings.");
929 if (period_size*u->frame_size != u->fragment_size ||
930 buffer_size*u->frame_size != u->hwbuf_size) {
931 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
932 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
933 (unsigned long) (buffer_size*u->fragment_size), (unsigned long) (period_size*u->frame_size));
937 if (update_sw_params(u) < 0)
940 if (build_pollfd(u) < 0)
943 /* FIXME: We need to reload the volume somehow */
945 snd_pcm_start(u->pcm_handle);
948 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
949 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
950 u->last_smoother_update = 0;
952 pa_log_info("Resumed successfully...");
958 snd_pcm_close(u->pcm_handle);
959 u->pcm_handle = NULL;
965 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
966 struct userdata *u = PA_SOURCE(o)->userdata;
970 case PA_SOURCE_MESSAGE_GET_LATENCY: {
974 r = source_get_latency(u);
976 *((pa_usec_t*) data) = r;
981 case PA_SOURCE_MESSAGE_SET_STATE:
983 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
985 case PA_SOURCE_SUSPENDED: {
987 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
989 if ((r = suspend(u)) < 0)
996 case PA_SOURCE_RUNNING: {
999 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1000 if (build_pollfd(u) < 0)
1003 snd_pcm_start(u->pcm_handle);
1006 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1007 if ((r = unsuspend(u)) < 0)
1014 case PA_SOURCE_UNLINKED:
1015 case PA_SOURCE_INIT:
1016 case PA_SOURCE_INVALID_STATE:
1023 return pa_source_process_msg(o, code, data, offset, chunk);
1026 /* Called from main context */
1027 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1028 pa_source_state_t old_state;
1031 pa_source_assert_ref(s);
1032 pa_assert_se(u = s->userdata);
1034 old_state = pa_source_get_state(u->source);
1036 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1038 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1039 if (reserve_init(u, u->device_name) < 0)
1040 return -PA_ERR_BUSY;
1045 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1046 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1049 pa_assert(u->mixer_handle);
1051 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1054 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1055 pa_source_get_volume(u->source, TRUE);
1056 pa_source_get_mute(u->source, TRUE);
1062 static void source_get_volume_cb(pa_source *s) {
1063 struct userdata *u = s->userdata;
1065 char t[PA_CVOLUME_SNPRINT_MAX];
1068 pa_assert(u->mixer_path);
1069 pa_assert(u->mixer_handle);
1071 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1074 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1075 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1077 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1079 if (pa_cvolume_equal(&u->hardware_volume, &r))
1082 s->volume = u->hardware_volume = r;
1084 /* Hmm, so the hardware volume changed, let's reset our software volume */
1085 if (u->mixer_path->has_dB)
1086 pa_source_set_soft_volume(s, NULL);
1089 static void source_set_volume_cb(pa_source *s) {
1090 struct userdata *u = s->userdata;
1092 char t[PA_CVOLUME_SNPRINT_MAX];
1095 pa_assert(u->mixer_path);
1096 pa_assert(u->mixer_handle);
1098 /* Shift up by the base volume */
1099 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1101 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1104 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1105 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1107 u->hardware_volume = r;
1109 if (u->mixer_path->has_dB) {
1110 pa_cvolume new_soft_volume;
1111 pa_bool_t accurate_enough;
1113 /* Match exactly what the user requested by software */
1114 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1116 /* If the adjustment to do in software is only minimal we
1117 * can skip it. That saves us CPU at the expense of a bit of
1120 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1121 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1123 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1124 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1125 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1126 pa_yes_no(accurate_enough));
1128 if (!accurate_enough)
1129 s->soft_volume = new_soft_volume;
1132 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1134 /* We can't match exactly what the user requested, hence let's
1135 * at least tell the user about it */
1141 static void source_get_mute_cb(pa_source *s) {
1142 struct userdata *u = s->userdata;
1146 pa_assert(u->mixer_path);
1147 pa_assert(u->mixer_handle);
1149 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1155 static void source_set_mute_cb(pa_source *s) {
1156 struct userdata *u = s->userdata;
1159 pa_assert(u->mixer_path);
1160 pa_assert(u->mixer_handle);
1162 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1165 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1166 struct userdata *u = s->userdata;
1167 pa_alsa_port_data *data;
1171 pa_assert(u->mixer_handle);
1173 data = PA_DEVICE_PORT_DATA(p);
1175 pa_assert_se(u->mixer_path = data->path);
1176 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1178 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1179 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1180 s->n_volume_steps = PA_VOLUME_NORM+1;
1182 if (u->mixer_path->max_dB > 0.0)
1183 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1185 pa_log_info("No particular base volume set, fixing to 0 dB");
1187 s->base_volume = PA_VOLUME_NORM;
1188 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1192 pa_alsa_setting_select(data->setting, u->mixer_handle);
1202 static void source_update_requested_latency_cb(pa_source *s) {
1203 struct userdata *u = s->userdata;
1209 update_sw_params(u);
1212 static void thread_func(void *userdata) {
1213 struct userdata *u = userdata;
1214 unsigned short revents = 0;
1218 pa_log_debug("Thread starting up");
1220 if (u->core->realtime_scheduling)
1221 pa_make_realtime(u->core->realtime_priority);
1223 pa_thread_mq_install(&u->thread_mq);
1229 pa_log_debug("Loop");
1232 /* Read some data and pass it to the sources */
1233 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1235 pa_usec_t sleep_usec = 0;
1236 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1239 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1241 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1246 /* pa_log_debug("work_done = %i", work_done); */
1251 if (u->use_tsched) {
1254 /* OK, the capture buffer is now empty, let's
1255 * calculate when to wake up next */
1257 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1259 /* Convert from the sound card time domain to the
1260 * system time domain */
1261 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1263 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1265 /* We don't trust the conversion, so we wake up whatever comes first */
1266 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1268 } else if (u->use_tsched)
1270 /* OK, we're in an invalid state, let's disable our timers */
1271 pa_rtpoll_set_timer_disabled(u->rtpoll);
1273 /* Hmm, nothing to do. Let's sleep */
1274 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1280 /* Tell ALSA about this and process its response */
1281 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1282 struct pollfd *pollfd;
1286 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1288 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1289 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1293 if (revents & ~POLLIN) {
1294 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1297 snd_pcm_start(u->pcm_handle);
1298 } else if (revents && u->use_tsched && pa_log_ratelimit())
1299 pa_log_debug("Wakeup from ALSA!");
1306 /* If this was no regular exit from the loop we have to continue
1307 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1308 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1309 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1312 pa_log_debug("Thread shutting down");
1315 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1321 pa_assert(device_name);
1323 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1324 pa_source_new_data_set_name(data, n);
1325 data->namereg_fail = TRUE;
1329 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1330 data->namereg_fail = TRUE;
1332 n = device_id ? device_id : device_name;
1333 data->namereg_fail = FALSE;
1337 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1339 t = pa_sprintf_malloc("alsa_input.%s", n);
1341 pa_source_new_data_set_name(data, t);
1345 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1347 if (!mapping && !element)
1350 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1351 pa_log_info("Failed to find a working mixer device.");
1357 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1360 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1363 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1364 pa_alsa_path_dump(u->mixer_path);
1367 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1370 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1372 pa_log_debug("Probed mixer paths:");
1373 pa_alsa_path_set_dump(u->mixer_path_set);
1380 if (u->mixer_path_set) {
1381 pa_alsa_path_set_free(u->mixer_path_set);
1382 u->mixer_path_set = NULL;
1383 } else if (u->mixer_path) {
1384 pa_alsa_path_free(u->mixer_path);
1385 u->mixer_path = NULL;
1388 if (u->mixer_handle) {
1389 snd_mixer_close(u->mixer_handle);
1390 u->mixer_handle = NULL;
1394 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1397 if (!u->mixer_handle)
1400 if (u->source->active_port) {
1401 pa_alsa_port_data *data;
1403 /* We have a list of supported paths, so let's activate the
1404 * one that has been chosen as active */
1406 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1407 u->mixer_path = data->path;
1409 pa_alsa_path_select(data->path, u->mixer_handle);
1412 pa_alsa_setting_select(data->setting, u->mixer_handle);
1416 if (!u->mixer_path && u->mixer_path_set)
1417 u->mixer_path = u->mixer_path_set->paths;
1419 if (u->mixer_path) {
1420 /* Hmm, we have only a single path, then let's activate it */
1422 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1424 if (u->mixer_path->settings)
1425 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1430 if (!u->mixer_path->has_volume)
1431 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1434 if (u->mixer_path->has_dB) {
1435 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1437 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1438 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1440 if (u->mixer_path->max_dB > 0.0)
1441 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1443 pa_log_info("No particular base volume set, fixing to 0 dB");
1446 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1447 u->source->base_volume = PA_VOLUME_NORM;
1448 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1451 u->source->get_volume = source_get_volume_cb;
1452 u->source->set_volume = source_set_volume_cb;
1454 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1455 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1458 if (!u->mixer_path->has_mute) {
1459 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1461 u->source->get_mute = source_get_mute_cb;
1462 u->source->set_mute = source_set_mute_cb;
1463 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1464 pa_log_info("Using hardware mute control.");
1467 u->mixer_fdl = pa_alsa_fdlist_new();
1469 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1470 pa_log("Failed to initialize file descriptor monitoring");
1474 if (u->mixer_path_set)
1475 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1477 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1482 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1484 struct userdata *u = NULL;
1485 const char *dev_id = NULL;
1486 pa_sample_spec ss, requested_ss;
1488 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1489 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1491 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1492 pa_source_new_data data;
1493 pa_alsa_profile_set *profile_set = NULL;
1498 ss = m->core->default_sample_spec;
1499 map = m->core->default_channel_map;
1500 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1501 pa_log("Failed to parse sample specification");
1506 frame_size = pa_frame_size(&ss);
1508 nfrags = m->core->default_n_fragments;
1509 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1511 frag_size = (uint32_t) frame_size;
1512 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1513 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1515 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1516 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1517 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1518 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1519 pa_log("Failed to parse buffer metrics");
1523 buffer_size = nfrags * frag_size;
1525 period_frames = frag_size/frame_size;
1526 buffer_frames = buffer_size/frame_size;
1527 tsched_frames = tsched_size/frame_size;
1529 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1530 pa_log("Failed to parse mmap argument.");
1534 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1535 pa_log("Failed to parse timer_scheduling argument.");
1539 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1540 pa_log("Failed to parse ignore_dB argument.");
1544 use_tsched = pa_alsa_may_tsched(use_tsched);
1546 u = pa_xnew0(struct userdata, 1);
1549 u->use_mmap = use_mmap;
1550 u->use_tsched = use_tsched;
1551 u->rtpoll = pa_rtpoll_new();
1552 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1554 u->smoother = pa_smoother_new(
1555 DEFAULT_TSCHED_WATERMARK_USEC*2,
1556 DEFAULT_TSCHED_WATERMARK_USEC*2,
1562 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1564 dev_id = pa_modargs_get_value(
1566 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1568 if (reserve_init(u, dev_id) < 0)
1571 if (reserve_monitor_init(u, dev_id) < 0)
1579 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1580 pa_log("device_id= not set");
1584 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1588 SND_PCM_STREAM_CAPTURE,
1589 &period_frames, &buffer_frames, tsched_frames,
1593 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1595 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1598 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1602 SND_PCM_STREAM_CAPTURE,
1603 &period_frames, &buffer_frames, tsched_frames,
1604 &b, &d, profile_set, &mapping)))
1609 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1610 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1613 SND_PCM_STREAM_CAPTURE,
1614 &period_frames, &buffer_frames, tsched_frames,
1619 pa_assert(u->device_name);
1620 pa_log_info("Successfully opened device %s.", u->device_name);
1622 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1623 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1628 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1630 if (use_mmap && !b) {
1631 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1632 u->use_mmap = use_mmap = FALSE;
1635 if (use_tsched && (!b || !d)) {
1636 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1637 u->use_tsched = use_tsched = FALSE;
1641 pa_log_info("Successfully enabled mmap() mode.");
1644 pa_log_info("Successfully enabled timer-based scheduling mode.");
1646 /* ALSA might tweak the sample spec, so recalculate the frame size */
1647 frame_size = pa_frame_size(&ss);
1649 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1651 pa_source_new_data_init(&data);
1652 data.driver = driver;
1655 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1656 pa_source_new_data_set_sample_spec(&data, &ss);
1657 pa_source_new_data_set_channel_map(&data, &map);
1659 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1660 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1661 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1662 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1663 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1666 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1667 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1670 pa_alsa_init_description(data.proplist);
1672 if (u->control_device)
1673 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1675 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1676 pa_log("Invalid properties");
1677 pa_source_new_data_done(&data);
1681 if (u->mixer_path_set)
1682 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1684 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1685 pa_source_new_data_done(&data);
1688 pa_log("Failed to create source object");
1692 u->source->parent.process_msg = source_process_msg;
1693 u->source->update_requested_latency = source_update_requested_latency_cb;
1694 u->source->set_state = source_set_state_cb;
1695 u->source->set_port = source_set_port_cb;
1696 u->source->userdata = u;
1698 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1699 pa_source_set_rtpoll(u->source, u->rtpoll);
1701 u->frame_size = frame_size;
1702 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1703 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1704 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1706 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1707 (double) u->hwbuf_size / (double) u->fragment_size,
1708 (long unsigned) u->fragment_size,
1709 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1710 (long unsigned) u->hwbuf_size,
1711 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1713 if (u->use_tsched) {
1714 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1716 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1717 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1719 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1720 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1722 fix_min_sleep_wakeup(u);
1723 fix_tsched_watermark(u);
1725 pa_source_set_latency_range(u->source,
1727 pa_bytes_to_usec(u->hwbuf_size, &ss));
1729 pa_log_info("Time scheduling watermark is %0.2fms",
1730 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1732 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1736 if (update_sw_params(u) < 0)
1739 if (setup_mixer(u, ignore_dB) < 0)
1742 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1744 if (!(u->thread = pa_thread_new(thread_func, u))) {
1745 pa_log("Failed to create thread.");
1748 /* Get initial mixer settings */
1749 if (data.volume_is_set) {
1750 if (u->source->set_volume)
1751 u->source->set_volume(u->source);
1753 if (u->source->get_volume)
1754 u->source->get_volume(u->source);
1757 if (data.muted_is_set) {
1758 if (u->source->set_mute)
1759 u->source->set_mute(u->source);
1761 if (u->source->get_mute)
1762 u->source->get_mute(u->source);
1765 pa_source_put(u->source);
1768 pa_alsa_profile_set_free(profile_set);
1778 pa_alsa_profile_set_free(profile_set);
1783 static void userdata_free(struct userdata *u) {
1787 pa_source_unlink(u->source);
1790 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1791 pa_thread_free(u->thread);
1794 pa_thread_mq_done(&u->thread_mq);
1797 pa_source_unref(u->source);
1799 if (u->alsa_rtpoll_item)
1800 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1803 pa_rtpoll_free(u->rtpoll);
1805 if (u->pcm_handle) {
1806 snd_pcm_drop(u->pcm_handle);
1807 snd_pcm_close(u->pcm_handle);
1811 pa_alsa_fdlist_free(u->mixer_fdl);
1813 if (u->mixer_path_set)
1814 pa_alsa_path_set_free(u->mixer_path_set);
1815 else if (u->mixer_path)
1816 pa_alsa_path_free(u->mixer_path);
1818 if (u->mixer_handle)
1819 snd_mixer_close(u->mixer_handle);
1822 pa_smoother_free(u->smoother);
1827 pa_xfree(u->device_name);
1828 pa_xfree(u->control_device);
1832 void pa_alsa_source_free(pa_source *s) {
1835 pa_source_assert_ref(s);
1836 pa_assert_se(u = s->userdata);