2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 #define ALSA_SUSPEND_ON_IDLE_TIMEOUT "0"
64 //#define DEBUG_TIMING
70 #define DEFAULT_DEVICE "default"
72 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
73 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
75 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
76 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
77 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
78 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
79 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
81 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
82 * will increase the watermark only if we hit a real underrun. */
84 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
85 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
87 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
88 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
90 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
98 pa_thread_mq thread_mq;
101 snd_pcm_t *pcm_handle;
103 pa_alsa_fdlist *mixer_fdl;
104 snd_mixer_t *mixer_handle;
105 pa_alsa_path_set *mixer_path_set;
106 pa_alsa_path *mixer_path;
108 pa_cvolume hardware_volume;
120 watermark_inc_threshold,
121 watermark_dec_threshold;
123 pa_usec_t watermark_dec_not_before;
125 pa_memchunk memchunk;
127 char *device_name; /* name of the PCM device */
128 char *control_device; /* name of the control device */
130 pa_bool_t use_mmap:1, use_tsched:1;
132 pa_bool_t first, after_rewind;
134 pa_rtpoll_item *alsa_rtpoll_item;
136 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
138 pa_smoother *smoother;
139 uint64_t write_count;
140 uint64_t since_start;
141 pa_usec_t smoother_interval;
142 pa_usec_t last_smoother_update;
144 pa_reserve_wrapper *reserve;
145 pa_hook_slot *reserve_slot;
146 pa_reserve_monitor_wrapper *monitor;
147 pa_hook_slot *monitor_slot;
150 static void userdata_free(struct userdata *u);
152 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
156 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
157 return PA_HOOK_CANCEL;
162 static void reserve_done(struct userdata *u) {
165 if (u->reserve_slot) {
166 pa_hook_slot_free(u->reserve_slot);
167 u->reserve_slot = NULL;
171 pa_reserve_wrapper_unref(u->reserve);
176 static void reserve_update(struct userdata *u) {
177 const char *description;
180 if (!u->sink || !u->reserve)
183 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
184 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
187 static int reserve_init(struct userdata *u, const char *dname) {
196 if (pa_in_system_mode())
199 if (!(rname = pa_alsa_get_reserve_name(dname)))
202 /* We are resuming, try to lock the device */
203 u->reserve = pa_reserve_wrapper_get(u->core, rname);
211 pa_assert(!u->reserve_slot);
212 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
217 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
223 b = PA_PTR_TO_UINT(busy) && !u->reserve;
225 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
229 static void monitor_done(struct userdata *u) {
232 if (u->monitor_slot) {
233 pa_hook_slot_free(u->monitor_slot);
234 u->monitor_slot = NULL;
238 pa_reserve_monitor_wrapper_unref(u->monitor);
243 static int reserve_monitor_init(struct userdata *u, const char *dname) {
249 if (pa_in_system_mode())
252 if (!(rname = pa_alsa_get_reserve_name(dname)))
255 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
261 pa_assert(!u->monitor_slot);
262 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
267 static void fix_min_sleep_wakeup(struct userdata *u) {
268 size_t max_use, max_use_2;
271 pa_assert(u->use_tsched);
273 max_use = u->hwbuf_size - u->hwbuf_unused;
274 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
276 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
277 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
279 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
280 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 static void fix_tsched_watermark(struct userdata *u) {
286 pa_assert(u->use_tsched);
288 max_use = u->hwbuf_size - u->hwbuf_unused;
290 if (u->tsched_watermark > max_use - u->min_sleep)
291 u->tsched_watermark = max_use - u->min_sleep;
293 if (u->tsched_watermark < u->min_wakeup)
294 u->tsched_watermark = u->min_wakeup;
297 static void increase_watermark(struct userdata *u) {
298 size_t old_watermark;
299 pa_usec_t old_min_latency, new_min_latency;
302 pa_assert(u->use_tsched);
304 /* First, just try to increase the watermark */
305 old_watermark = u->tsched_watermark;
306 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
307 fix_tsched_watermark(u);
309 if (old_watermark != u->tsched_watermark) {
310 pa_log_info("Increasing wakeup watermark to %0.2f ms",
311 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
315 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
316 old_min_latency = u->sink->thread_info.min_latency;
317 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
318 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
320 if (old_min_latency != new_min_latency) {
321 pa_log_info("Increasing minimal latency to %0.2f ms",
322 (double) new_min_latency / PA_USEC_PER_MSEC);
324 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
327 /* When we reach this we're officialy fucked! */
330 static void decrease_watermark(struct userdata *u) {
331 size_t old_watermark;
335 pa_assert(u->use_tsched);
337 now = pa_rtclock_now();
339 if (u->watermark_dec_not_before <= 0)
342 if (u->watermark_dec_not_before > now)
345 old_watermark = u->tsched_watermark;
347 if (u->tsched_watermark < u->watermark_dec_step)
348 u->tsched_watermark = u->tsched_watermark / 2;
350 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
352 fix_tsched_watermark(u);
354 if (old_watermark != u->tsched_watermark)
355 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
356 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
358 /* We don't change the latency range*/
361 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
364 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
367 pa_assert(sleep_usec);
368 pa_assert(process_usec);
371 pa_assert(u->use_tsched);
373 usec = pa_sink_get_requested_latency_within_thread(u->sink);
375 if (usec == (pa_usec_t) -1)
376 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
378 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
383 *sleep_usec = usec - wm;
387 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
388 (unsigned long) (usec / PA_USEC_PER_MSEC),
389 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
394 static int try_recover(struct userdata *u, const char *call, int err) {
399 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
401 pa_assert(err != -EAGAIN);
404 pa_log_debug("%s: Buffer underrun!", call);
406 if (err == -ESTRPIPE)
407 pa_log_debug("%s: System suspended!", call);
409 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
410 pa_log("%s: %s", call, pa_alsa_strerror(err));
419 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
421 pa_bool_t underrun = FALSE;
423 /* We use <= instead of < for this check here because an underrun
424 * only happens after the last sample was processed, not already when
425 * it is removed from the buffer. This is particularly important
426 * when block transfer is used. */
428 if (n_bytes <= u->hwbuf_size)
429 left_to_play = u->hwbuf_size - n_bytes;
432 /* We got a dropout. What a mess! */
440 if (!u->first && !u->after_rewind)
441 if (pa_log_ratelimit())
442 pa_log_info("Underrun!");
446 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
447 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
448 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
449 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
453 pa_bool_t reset_not_before = TRUE;
455 if (!u->first && !u->after_rewind) {
456 if (underrun || left_to_play < u->watermark_inc_threshold)
457 increase_watermark(u);
458 else if (left_to_play > u->watermark_dec_threshold) {
459 reset_not_before = FALSE;
461 /* We decrease the watermark only if have actually
462 * been woken up by a timeout. If something else woke
463 * us up it's too easy to fulfill the deadlines... */
466 decrease_watermark(u);
470 if (reset_not_before)
471 u->watermark_dec_not_before = 0;
477 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
478 pa_bool_t work_done = TRUE;
479 pa_usec_t max_sleep_usec = 0, process_usec = 0;
484 pa_sink_assert_ref(u->sink);
487 hw_sleep_time(u, &max_sleep_usec, &process_usec);
493 pa_bool_t after_avail = TRUE;
495 /* First we determine how many samples are missing to fill the
496 * buffer up to 100% */
498 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
500 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
506 n_bytes = (size_t) n * u->frame_size;
509 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
517 /* We won't fill up the playback buffer before at least
518 * half the sleep time is over because otherwise we might
519 * ask for more data from the clients then they expect. We
520 * need to guarantee that clients only have to keep around
521 * a single hw buffer length. */
524 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
526 pa_log_debug("Not filling up, because too early.");
531 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
535 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
536 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
537 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
538 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
544 pa_log_debug("Not filling up, because not necessary.");
552 pa_log_debug("Not filling up, because already too many iterations.");
558 n_bytes -= u->hwbuf_unused;
562 pa_log_debug("Filling up");
569 const snd_pcm_channel_area_t *areas;
570 snd_pcm_uframes_t offset, frames;
571 snd_pcm_sframes_t sframes;
573 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
574 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
576 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
578 if (!after_avail && err == -EAGAIN)
581 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
587 /* Make sure that if these memblocks need to be copied they will fit into one slot */
588 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
589 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
591 if (!after_avail && frames == 0)
594 pa_assert(frames > 0);
597 /* Check these are multiples of 8 bit */
598 pa_assert((areas[0].first & 7) == 0);
599 pa_assert((areas[0].step & 7)== 0);
601 /* We assume a single interleaved memory buffer */
602 pa_assert((areas[0].first >> 3) == 0);
603 pa_assert((areas[0].step >> 3) == u->frame_size);
605 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
607 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
608 chunk.length = pa_memblock_get_length(chunk.memblock);
611 pa_sink_render_into_full(u->sink, &chunk);
612 pa_memblock_unref_fixed(chunk.memblock);
614 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
616 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
624 u->write_count += frames * u->frame_size;
625 u->since_start += frames * u->frame_size;
628 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
631 if ((size_t) frames * u->frame_size >= n_bytes)
634 n_bytes -= (size_t) frames * u->frame_size;
638 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
640 if (*sleep_usec > process_usec)
641 *sleep_usec -= process_usec;
645 return work_done ? 1 : 0;
648 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
649 pa_bool_t work_done = FALSE;
650 pa_usec_t max_sleep_usec = 0, process_usec = 0;
655 pa_sink_assert_ref(u->sink);
658 hw_sleep_time(u, &max_sleep_usec, &process_usec);
664 pa_bool_t after_avail = TRUE;
666 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
668 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
674 n_bytes = (size_t) n * u->frame_size;
675 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
680 /* We won't fill up the playback buffer before at least
681 * half the sleep time is over because otherwise we might
682 * ask for more data from the clients then they expect. We
683 * need to guarantee that clients only have to keep around
684 * a single hw buffer length. */
687 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
690 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
694 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
695 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
696 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
697 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
707 pa_log_debug("Not filling up, because already too many iterations.");
713 n_bytes -= u->hwbuf_unused;
717 snd_pcm_sframes_t frames;
720 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
722 if (u->memchunk.length <= 0)
723 pa_sink_render(u->sink, n_bytes, &u->memchunk);
725 pa_assert(u->memchunk.length > 0);
727 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
729 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
730 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
732 p = pa_memblock_acquire(u->memchunk.memblock);
733 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
734 pa_memblock_release(u->memchunk.memblock);
736 if (PA_UNLIKELY(frames < 0)) {
738 if (!after_avail && (int) frames == -EAGAIN)
741 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
747 if (!after_avail && frames == 0)
750 pa_assert(frames > 0);
753 u->memchunk.index += (size_t) frames * u->frame_size;
754 u->memchunk.length -= (size_t) frames * u->frame_size;
756 if (u->memchunk.length <= 0) {
757 pa_memblock_unref(u->memchunk.memblock);
758 pa_memchunk_reset(&u->memchunk);
763 u->write_count += frames * u->frame_size;
764 u->since_start += frames * u->frame_size;
766 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
768 if ((size_t) frames * u->frame_size >= n_bytes)
771 n_bytes -= (size_t) frames * u->frame_size;
775 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
777 if (*sleep_usec > process_usec)
778 *sleep_usec -= process_usec;
782 return work_done ? 1 : 0;
785 static void update_smoother(struct userdata *u) {
786 snd_pcm_sframes_t delay = 0;
789 pa_usec_t now1 = 0, now2;
790 snd_pcm_status_t *status;
792 snd_pcm_status_alloca(&status);
795 pa_assert(u->pcm_handle);
797 /* Let's update the time smoother */
799 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
800 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
804 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
805 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
807 snd_htimestamp_t htstamp = { 0, 0 };
808 snd_pcm_status_get_htstamp(status, &htstamp);
809 now1 = pa_timespec_load(&htstamp);
812 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
814 now1 = pa_rtclock_now();
816 /* check if the time since the last update is bigger than the interval */
817 if (u->last_smoother_update > 0)
818 if (u->last_smoother_update + u->smoother_interval > now1)
821 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
823 if (PA_UNLIKELY(position < 0))
826 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
828 pa_smoother_put(u->smoother, now1, now2);
830 u->last_smoother_update = now1;
831 /* exponentially increase the update interval up to the MAX limit */
832 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
835 static pa_usec_t sink_get_latency(struct userdata *u) {
838 pa_usec_t now1, now2;
842 now1 = pa_rtclock_now();
843 now2 = pa_smoother_get(u->smoother, now1);
845 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
847 r = delay >= 0 ? (pa_usec_t) delay : 0;
849 if (u->memchunk.memblock)
850 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
855 static int build_pollfd(struct userdata *u) {
857 pa_assert(u->pcm_handle);
859 if (u->alsa_rtpoll_item)
860 pa_rtpoll_item_free(u->alsa_rtpoll_item);
862 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
868 /* Called from IO context */
869 static int suspend(struct userdata *u) {
871 pa_assert(u->pcm_handle);
873 pa_smoother_pause(u->smoother, pa_rtclock_now());
875 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
876 * take awfully long with our long buffer sizes today. */
877 snd_pcm_close(u->pcm_handle);
878 u->pcm_handle = NULL;
880 if (u->alsa_rtpoll_item) {
881 pa_rtpoll_item_free(u->alsa_rtpoll_item);
882 u->alsa_rtpoll_item = NULL;
885 pa_log_info("Device suspended...");
890 /* Called from IO context */
891 static int update_sw_params(struct userdata *u) {
892 snd_pcm_uframes_t avail_min;
897 /* Use the full buffer if noone asked us for anything specific */
903 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
906 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
908 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
910 /* We need at least one sample in our buffer */
912 if (PA_UNLIKELY(b < u->frame_size))
915 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
918 fix_min_sleep_wakeup(u);
919 fix_tsched_watermark(u);
922 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
924 /* We need at last one frame in the used part of the buffer */
925 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
928 pa_usec_t sleep_usec, process_usec;
930 hw_sleep_time(u, &sleep_usec, &process_usec);
931 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
934 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
936 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
937 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
941 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
946 /* Called from IO context */
947 static int unsuspend(struct userdata *u) {
951 snd_pcm_uframes_t period_size, buffer_size;
954 pa_assert(!u->pcm_handle);
956 pa_log_info("Trying resume...");
958 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
960 SND_PCM_NO_AUTO_RESAMPLE|
961 SND_PCM_NO_AUTO_CHANNELS|
962 SND_PCM_NO_AUTO_FORMAT)) < 0) {
963 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
967 ss = u->sink->sample_spec;
968 period_size = u->fragment_size / u->frame_size;
969 buffer_size = u->hwbuf_size / u->frame_size;
973 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
974 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
978 if (b != u->use_mmap || d != u->use_tsched) {
979 pa_log_warn("Resume failed, couldn't get original access mode.");
983 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
984 pa_log_warn("Resume failed, couldn't restore original sample settings.");
988 if (period_size*u->frame_size != u->fragment_size ||
989 buffer_size*u->frame_size != u->hwbuf_size) {
990 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
991 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
992 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
996 if (update_sw_params(u) < 0)
999 if (build_pollfd(u) < 0)
1003 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1004 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1005 u->last_smoother_update = 0;
1010 pa_log_info("Resumed successfully...");
1015 if (u->pcm_handle) {
1016 snd_pcm_close(u->pcm_handle);
1017 u->pcm_handle = NULL;
1023 /* Called from IO context */
1024 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1025 struct userdata *u = PA_SINK(o)->userdata;
1029 case PA_SINK_MESSAGE_GET_LATENCY: {
1033 r = sink_get_latency(u);
1035 *((pa_usec_t*) data) = r;
1040 case PA_SINK_MESSAGE_SET_STATE:
1042 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1044 case PA_SINK_SUSPENDED: {
1047 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1049 if ((r = suspend(u)) < 0)
1056 case PA_SINK_RUNNING: {
1059 if (u->sink->thread_info.state == PA_SINK_INIT) {
1060 if (build_pollfd(u) < 0)
1064 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1065 if ((r = unsuspend(u)) < 0)
1072 case PA_SINK_UNLINKED:
1074 case PA_SINK_INVALID_STATE:
1081 return pa_sink_process_msg(o, code, data, offset, chunk);
1084 /* Called from main context */
1085 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1086 pa_sink_state_t old_state;
1089 pa_sink_assert_ref(s);
1090 pa_assert_se(u = s->userdata);
1092 old_state = pa_sink_get_state(u->sink);
1094 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1096 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1097 if (reserve_init(u, u->device_name) < 0)
1098 return -PA_ERR_BUSY;
1103 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1104 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1107 pa_assert(u->mixer_handle);
1109 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1112 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1113 pa_sink_get_volume(u->sink, TRUE);
1114 pa_sink_get_mute(u->sink, TRUE);
1120 static void sink_get_volume_cb(pa_sink *s) {
1121 struct userdata *u = s->userdata;
1123 char t[PA_CVOLUME_SNPRINT_MAX];
1126 pa_assert(u->mixer_path);
1127 pa_assert(u->mixer_handle);
1129 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1132 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1133 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1135 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1137 if (pa_cvolume_equal(&u->hardware_volume, &r))
1140 s->real_volume = u->hardware_volume = r;
1142 /* Hmm, so the hardware volume changed, let's reset our software volume */
1143 if (u->mixer_path->has_dB)
1144 pa_sink_set_soft_volume(s, NULL);
1147 static void sink_set_volume_cb(pa_sink *s) {
1148 struct userdata *u = s->userdata;
1150 char t[PA_CVOLUME_SNPRINT_MAX];
1153 pa_assert(u->mixer_path);
1154 pa_assert(u->mixer_handle);
1156 /* Shift up by the base volume */
1157 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1159 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1162 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1163 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1165 u->hardware_volume = r;
1167 if (u->mixer_path->has_dB) {
1168 pa_cvolume new_soft_volume;
1169 pa_bool_t accurate_enough;
1171 /* Match exactly what the user requested by software */
1172 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1174 /* If the adjustment to do in software is only minimal we
1175 * can skip it. That saves us CPU at the expense of a bit of
1178 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1179 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1181 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1182 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1183 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1184 pa_yes_no(accurate_enough));
1186 if (!accurate_enough)
1187 s->soft_volume = new_soft_volume;
1190 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1192 /* We can't match exactly what the user requested, hence let's
1193 * at least tell the user about it */
1199 static void sink_get_mute_cb(pa_sink *s) {
1200 struct userdata *u = s->userdata;
1204 pa_assert(u->mixer_path);
1205 pa_assert(u->mixer_handle);
1207 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1213 static void sink_set_mute_cb(pa_sink *s) {
1214 struct userdata *u = s->userdata;
1217 pa_assert(u->mixer_path);
1218 pa_assert(u->mixer_handle);
1220 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1223 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1224 struct userdata *u = s->userdata;
1225 pa_alsa_port_data *data;
1229 pa_assert(u->mixer_handle);
1231 data = PA_DEVICE_PORT_DATA(p);
1233 pa_assert_se(u->mixer_path = data->path);
1234 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1236 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1237 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1238 s->n_volume_steps = PA_VOLUME_NORM+1;
1240 if (u->mixer_path->max_dB > 0.0)
1241 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1243 pa_log_info("No particular base volume set, fixing to 0 dB");
1245 s->base_volume = PA_VOLUME_NORM;
1246 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1250 pa_alsa_setting_select(data->setting, u->mixer_handle);
1260 static void sink_update_requested_latency_cb(pa_sink *s) {
1261 struct userdata *u = s->userdata;
1268 before = u->hwbuf_unused;
1269 update_sw_params(u);
1271 /* Let's check whether we now use only a smaller part of the
1272 buffer then before. If so, we need to make sure that subsequent
1273 rewinds are relative to the new maximum fill level and not to the
1274 current fill level. Thus, let's do a full rewind once, to clear
1277 if (u->hwbuf_unused > before) {
1278 pa_log_debug("Requesting rewind due to latency change.");
1279 pa_sink_request_rewind(s, (size_t) -1);
1283 static int process_rewind(struct userdata *u) {
1284 snd_pcm_sframes_t unused;
1285 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1289 /* Figure out how much we shall rewind and reset the counter */
1290 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1291 u->sink->thread_info.rewind_nbytes = 0;
1293 if (rewind_nbytes > 0) {
1294 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1295 rewind_nbytes = PA_MIN(u->memchunk.length, rewind_nbytes);
1296 u->memchunk.length -= rewind_nbytes;
1297 if (u->memchunk.length <= 0 && u->memchunk.memblock) {
1298 pa_memblock_unref(u->memchunk.memblock);
1299 pa_memchunk_reset(&u->memchunk);
1301 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1304 u->write_count -= rewind_nbytes;
1305 pa_sink_process_rewind(u->sink, rewind_nbytes);
1307 u->after_rewind = TRUE;
1312 /* Figure out how much we shall rewind and reset the counter */
1313 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1315 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1317 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1318 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1322 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1324 if (u->hwbuf_size > unused_nbytes)
1325 limit_nbytes = u->hwbuf_size - unused_nbytes;
1329 if (rewind_nbytes > limit_nbytes)
1330 rewind_nbytes = limit_nbytes;
1332 if (rewind_nbytes > 0) {
1333 snd_pcm_sframes_t in_frames, out_frames;
1335 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1337 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1338 pa_log_debug("before: %lu", (unsigned long) in_frames);
1339 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1340 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1341 if (try_recover(u, "process_rewind", out_frames) < 0)
1346 pa_log_debug("after: %lu", (unsigned long) out_frames);
1348 rewind_nbytes = (size_t) out_frames * u->frame_size;
1350 if (rewind_nbytes <= 0)
1351 pa_log_info("Tried rewind, but was apparently not possible.");
1353 u->write_count -= rewind_nbytes;
1354 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1355 pa_sink_process_rewind(u->sink, rewind_nbytes);
1357 u->after_rewind = TRUE;
1361 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1363 pa_sink_process_rewind(u->sink, 0);
1369 static void thread_func(void *userdata) {
1370 struct userdata *u = userdata;
1371 unsigned short revents = 0;
1375 pa_log_debug("Thread starting up");
1377 if (u->core->realtime_scheduling)
1378 pa_make_realtime(u->core->realtime_priority);
1380 pa_thread_mq_install(&u->thread_mq);
1386 pa_log_debug("Loop");
1389 /* Render some data and write it to the dsp */
1390 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1392 pa_usec_t sleep_usec = 0;
1393 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1395 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1396 if (process_rewind(u) < 0)
1400 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1402 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1407 /* pa_log_debug("work_done = %i", work_done); */
1412 pa_log_info("Starting playback.");
1413 snd_pcm_start(u->pcm_handle);
1415 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1421 if (u->use_tsched) {
1424 if (u->since_start <= u->hwbuf_size) {
1426 /* USB devices on ALSA seem to hit a buffer
1427 * underrun during the first iterations much
1428 * quicker then we calculate here, probably due to
1429 * the transport latency. To accommodate for that
1430 * we artificially decrease the sleep time until
1431 * we have filled the buffer at least once
1434 if (pa_log_ratelimit())
1435 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1439 /* OK, the playback buffer is now full, let's
1440 * calculate when to wake up next */
1441 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1443 /* Convert from the sound card time domain to the
1444 * system time domain */
1445 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1447 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1449 /* We don't trust the conversion, so we wake up whatever comes first */
1450 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1454 u->after_rewind = FALSE;
1456 } else if (u->use_tsched)
1458 /* OK, we're in an invalid state, let's disable our timers */
1459 pa_rtpoll_set_timer_disabled(u->rtpoll);
1461 /* Hmm, nothing to do. Let's sleep */
1462 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1468 /* Tell ALSA about this and process its response */
1469 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1470 struct pollfd *pollfd;
1474 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1476 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1477 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1481 if (revents & ~POLLOUT) {
1482 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1487 } else if (revents && u->use_tsched && pa_log_ratelimit())
1488 pa_log_debug("Wakeup from ALSA!");
1495 /* If this was no regular exit from the loop we have to continue
1496 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1497 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1498 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1501 pa_log_debug("Thread shutting down");
1504 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1510 pa_assert(device_name);
1512 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1513 pa_sink_new_data_set_name(data, n);
1514 data->namereg_fail = TRUE;
1518 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1519 data->namereg_fail = TRUE;
1521 n = device_id ? device_id : device_name;
1522 data->namereg_fail = FALSE;
1526 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1528 t = pa_sprintf_malloc("alsa_output.%s", n);
1530 pa_sink_new_data_set_name(data, t);
1534 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1536 if (!mapping && !element)
1539 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1540 pa_log_info("Failed to find a working mixer device.");
1546 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1549 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1552 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1553 pa_alsa_path_dump(u->mixer_path);
1556 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1559 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1561 pa_log_debug("Probed mixer paths:");
1562 pa_alsa_path_set_dump(u->mixer_path_set);
1569 if (u->mixer_path_set) {
1570 pa_alsa_path_set_free(u->mixer_path_set);
1571 u->mixer_path_set = NULL;
1572 } else if (u->mixer_path) {
1573 pa_alsa_path_free(u->mixer_path);
1574 u->mixer_path = NULL;
1577 if (u->mixer_handle) {
1578 snd_mixer_close(u->mixer_handle);
1579 u->mixer_handle = NULL;
1583 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1586 if (!u->mixer_handle)
1589 if (u->sink->active_port) {
1590 pa_alsa_port_data *data;
1592 /* We have a list of supported paths, so let's activate the
1593 * one that has been chosen as active */
1595 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1596 u->mixer_path = data->path;
1598 pa_alsa_path_select(data->path, u->mixer_handle);
1601 pa_alsa_setting_select(data->setting, u->mixer_handle);
1605 if (!u->mixer_path && u->mixer_path_set)
1606 u->mixer_path = u->mixer_path_set->paths;
1608 if (u->mixer_path) {
1609 /* Hmm, we have only a single path, then let's activate it */
1611 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1613 if (u->mixer_path->settings)
1614 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1619 if (!u->mixer_path->has_volume)
1620 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1623 if (u->mixer_path->has_dB) {
1624 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1626 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1627 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1629 if (u->mixer_path->max_dB > 0.0)
1630 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1632 pa_log_info("No particular base volume set, fixing to 0 dB");
1635 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1636 u->sink->base_volume = PA_VOLUME_NORM;
1637 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1640 u->sink->get_volume = sink_get_volume_cb;
1641 u->sink->set_volume = sink_set_volume_cb;
1643 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1644 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1647 if (!u->mixer_path->has_mute) {
1648 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1650 u->sink->get_mute = sink_get_mute_cb;
1651 u->sink->set_mute = sink_set_mute_cb;
1652 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1653 pa_log_info("Using hardware mute control.");
1656 u->mixer_fdl = pa_alsa_fdlist_new();
1658 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1659 pa_log("Failed to initialize file descriptor monitoring");
1663 if (u->mixer_path_set)
1664 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1666 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1671 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1673 struct userdata *u = NULL;
1674 const char *dev_id = NULL;
1675 pa_sample_spec ss, requested_ss;
1677 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1678 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1680 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1681 pa_sink_new_data data;
1682 pa_alsa_profile_set *profile_set = NULL;
1687 ss = m->core->default_sample_spec;
1688 map = m->core->default_channel_map;
1689 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1690 pa_log("Failed to parse sample specification and channel map");
1695 frame_size = pa_frame_size(&ss);
1697 nfrags = m->core->default_n_fragments;
1698 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1700 frag_size = (uint32_t) frame_size;
1701 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1702 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1704 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1705 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1706 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1707 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1708 pa_log("Failed to parse buffer metrics");
1712 buffer_size = nfrags * frag_size;
1714 period_frames = frag_size/frame_size;
1715 buffer_frames = buffer_size/frame_size;
1716 tsched_frames = tsched_size/frame_size;
1718 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1719 pa_log("Failed to parse mmap argument.");
1723 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1724 pa_log("Failed to parse tsched argument.");
1728 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1729 pa_log("Failed to parse ignore_dB argument.");
1733 use_tsched = pa_alsa_may_tsched(use_tsched);
1735 u = pa_xnew0(struct userdata, 1);
1738 u->use_mmap = use_mmap;
1739 u->use_tsched = use_tsched;
1741 u->rtpoll = pa_rtpoll_new();
1742 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1744 u->smoother = pa_smoother_new(
1745 DEFAULT_TSCHED_BUFFER_USEC*2,
1746 DEFAULT_TSCHED_BUFFER_USEC*2,
1752 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1754 dev_id = pa_modargs_get_value(
1756 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1758 if (reserve_init(u, dev_id) < 0)
1761 if (reserve_monitor_init(u, dev_id) < 0)
1769 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1770 pa_log("device_id= not set");
1774 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1778 SND_PCM_STREAM_PLAYBACK,
1779 &period_frames, &buffer_frames, tsched_frames,
1784 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1786 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1789 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1793 SND_PCM_STREAM_PLAYBACK,
1794 &period_frames, &buffer_frames, tsched_frames,
1795 &b, &d, profile_set, &mapping)))
1801 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1802 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1805 SND_PCM_STREAM_PLAYBACK,
1806 &period_frames, &buffer_frames, tsched_frames,
1811 pa_assert(u->device_name);
1812 pa_log_info("Successfully opened device %s.", u->device_name);
1814 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1815 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1820 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1822 if (use_mmap && !b) {
1823 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1824 u->use_mmap = use_mmap = FALSE;
1827 if (use_tsched && (!b || !d)) {
1828 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1829 u->use_tsched = use_tsched = FALSE;
1833 pa_log_info("Successfully enabled mmap() mode.");
1836 pa_log_info("Successfully enabled timer-based scheduling mode.");
1838 /* ALSA might tweak the sample spec, so recalculate the frame size */
1839 frame_size = pa_frame_size(&ss);
1841 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1843 pa_sink_new_data_init(&data);
1844 data.driver = driver;
1847 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1848 pa_sink_new_data_set_sample_spec(&data, &ss);
1849 pa_sink_new_data_set_channel_map(&data, &map);
1851 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1852 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1853 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1854 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1855 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1857 /* Set Suspend timeout to ZERO to avoid noise */
1858 pa_log_info("Set suspend-on-idle timeout to ZERO to avoid noise");
1859 pa_proplist_sets(data.proplist, "module-suspend-on-idle.timeout", ALSA_SUSPEND_ON_IDLE_TIMEOUT);
1862 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1863 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1866 pa_alsa_init_description(data.proplist);
1868 if (u->control_device)
1869 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1871 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1872 pa_log("Invalid properties");
1873 pa_sink_new_data_done(&data);
1877 if (u->mixer_path_set)
1878 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1880 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1881 pa_sink_new_data_done(&data);
1884 pa_log("Failed to create sink object");
1888 u->sink->parent.process_msg = sink_process_msg;
1889 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1890 u->sink->set_state = sink_set_state_cb;
1891 u->sink->set_port = sink_set_port_cb;
1892 u->sink->userdata = u;
1894 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1895 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1897 u->frame_size = frame_size;
1898 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1899 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1900 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1902 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1903 (double) u->hwbuf_size / (double) u->fragment_size,
1904 (long unsigned) u->fragment_size,
1905 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1906 (long unsigned) u->hwbuf_size,
1907 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1909 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1910 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1912 if (u->use_tsched) {
1913 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1915 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1916 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1918 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1919 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1921 fix_min_sleep_wakeup(u);
1922 fix_tsched_watermark(u);
1924 pa_sink_set_latency_range(u->sink,
1926 pa_bytes_to_usec(u->hwbuf_size, &ss));
1928 pa_log_info("Time scheduling watermark is %0.2fms",
1929 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1931 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1936 if (update_sw_params(u) < 0)
1939 if (setup_mixer(u, ignore_dB) < 0)
1942 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1944 if (!(u->thread = pa_thread_new(thread_func, u))) {
1945 pa_log("Failed to create thread.");
1949 /* Get initial mixer settings */
1950 if (data.volume_is_set) {
1951 if (u->sink->set_volume)
1952 u->sink->set_volume(u->sink);
1954 if (u->sink->get_volume)
1955 u->sink->get_volume(u->sink);
1958 if (data.muted_is_set) {
1959 if (u->sink->set_mute)
1960 u->sink->set_mute(u->sink);
1962 if (u->sink->get_mute)
1963 u->sink->get_mute(u->sink);
1966 pa_sink_put(u->sink);
1969 pa_alsa_profile_set_free(profile_set);
1979 pa_alsa_profile_set_free(profile_set);
1984 static void userdata_free(struct userdata *u) {
1988 pa_sink_unlink(u->sink);
1991 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1992 pa_thread_free(u->thread);
1995 pa_thread_mq_done(&u->thread_mq);
1998 pa_sink_unref(u->sink);
2000 if (u->memchunk.memblock)
2001 pa_memblock_unref(u->memchunk.memblock);
2003 if (u->alsa_rtpoll_item)
2004 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2007 pa_rtpoll_free(u->rtpoll);
2009 if (u->pcm_handle) {
2010 snd_pcm_drop(u->pcm_handle);
2011 snd_pcm_close(u->pcm_handle);
2015 pa_alsa_fdlist_free(u->mixer_fdl);
2017 if (u->mixer_path_set)
2018 pa_alsa_path_set_free(u->mixer_path_set);
2019 else if (u->mixer_path)
2020 pa_alsa_path_free(u->mixer_path);
2022 if (u->mixer_handle)
2023 snd_mixer_close(u->mixer_handle);
2026 pa_smoother_free(u->smoother);
2031 pa_xfree(u->device_name);
2032 pa_xfree(u->control_device);
2036 void pa_alsa_sink_free(pa_sink *s) {
2039 pa_sink_assert_ref(s);
2040 pa_assert_se(u = s->userdata);