2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <asoundlib.h>
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <pulse/rtclock.h>
35 #include <pulse/timeval.h>
36 #include <pulse/volume.h>
37 #include <pulse/xmalloc.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/core.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/thread-mq.h>
53 #include <pulsecore/rtpoll.h>
54 #include <pulsecore/time-smoother.h>
56 #include <modules/reserve-wrap.h>
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
61 /* #define DEBUG_TIMING */
63 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
69 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
70 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
71 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
72 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
75 * will increase the watermark only if we hit a real underrun. */
77 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
78 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
81 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
84 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
89 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91 #define DEFAULT_WRITE_ITERATION_THRESHOLD 0.03 /* don't iterate write if < 3% of the buffer is available */
99 pa_thread_mq thread_mq;
102 snd_pcm_t *pcm_handle;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
111 pa_cvolume hardware_volume;
120 tsched_watermark_ref,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
130 snd_pcm_uframes_t frames_per_block;
132 pa_usec_t watermark_dec_not_before;
133 pa_usec_t min_latency_ref;
134 pa_usec_t tsched_watermark_usec;
136 pa_memchunk memchunk;
138 char *device_name; /* name of the PCM device */
139 char *control_device; /* name of the control device */
141 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
143 bool first, after_rewind;
145 pa_rtpoll_item *alsa_rtpoll_item;
147 pa_smoother *smoother;
148 uint64_t write_count;
149 uint64_t since_start;
150 pa_usec_t smoother_interval;
151 pa_usec_t last_smoother_update;
155 pa_reserve_wrapper *reserve;
156 pa_hook_slot *reserve_slot;
157 pa_reserve_monitor_wrapper *monitor;
158 pa_hook_slot *monitor_slot;
161 pa_alsa_ucm_mapping_context *ucm_context;
165 SINK_MESSAGE_SYNC_MIXER = PA_SINK_MESSAGE_MAX
168 static void userdata_free(struct userdata *u);
170 /* FIXME: Is there a better way to do this than device names? */
171 static bool is_iec958(struct userdata *u) {
172 return (strncmp("iec958", u->device_name, 6) == 0);
175 static bool is_hdmi(struct userdata *u) {
176 return (strncmp("hdmi", u->device_name, 4) == 0);
179 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
183 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
185 if (pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION) < 0)
186 return PA_HOOK_CANCEL;
191 static void reserve_done(struct userdata *u) {
194 if (u->reserve_slot) {
195 pa_hook_slot_free(u->reserve_slot);
196 u->reserve_slot = NULL;
200 pa_reserve_wrapper_unref(u->reserve);
205 static void reserve_update(struct userdata *u) {
206 const char *description;
209 if (!u->sink || !u->reserve)
212 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
213 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
216 static int reserve_init(struct userdata *u, const char *dname) {
225 if (pa_in_system_mode())
228 if (!(rname = pa_alsa_get_reserve_name(dname)))
231 /* We are resuming, try to lock the device */
232 u->reserve = pa_reserve_wrapper_get(u->core, rname);
240 pa_assert(!u->reserve_slot);
241 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
246 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
250 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
251 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
252 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
254 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
255 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
261 static void monitor_done(struct userdata *u) {
264 if (u->monitor_slot) {
265 pa_hook_slot_free(u->monitor_slot);
266 u->monitor_slot = NULL;
270 pa_reserve_monitor_wrapper_unref(u->monitor);
275 static int reserve_monitor_init(struct userdata *u, const char *dname) {
281 if (pa_in_system_mode())
284 if (!(rname = pa_alsa_get_reserve_name(dname)))
287 /* We are resuming, try to lock the device */
288 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
294 pa_assert(!u->monitor_slot);
295 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
300 static void fix_min_sleep_wakeup(struct userdata *u) {
301 size_t max_use, max_use_2;
304 pa_assert(u->use_tsched);
306 max_use = u->hwbuf_size - u->hwbuf_unused;
307 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
309 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
310 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
312 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
313 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
316 static void fix_tsched_watermark(struct userdata *u) {
319 pa_assert(u->use_tsched);
321 max_use = u->hwbuf_size - u->hwbuf_unused;
323 if (u->tsched_watermark > max_use - u->min_sleep)
324 u->tsched_watermark = max_use - u->min_sleep;
326 if (u->tsched_watermark < u->min_wakeup)
327 u->tsched_watermark = u->min_wakeup;
329 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
332 static void increase_watermark(struct userdata *u) {
333 size_t old_watermark;
334 pa_usec_t old_min_latency, new_min_latency;
337 pa_assert(u->use_tsched);
339 /* First, just try to increase the watermark */
340 old_watermark = u->tsched_watermark;
341 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
342 fix_tsched_watermark(u);
344 if (old_watermark != u->tsched_watermark) {
345 pa_log_info("Increasing wakeup watermark to %0.2f ms",
346 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
350 /* Hmm, we cannot increase the watermark any further, hence let's
351 raise the latency, unless doing so was disabled in
353 if (u->fixed_latency_range)
356 old_min_latency = u->sink->thread_info.min_latency;
357 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
358 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
360 if (old_min_latency != new_min_latency) {
361 pa_log_info("Increasing minimal latency to %0.2f ms",
362 (double) new_min_latency / PA_USEC_PER_MSEC);
364 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
367 /* When we reach this we're officially fucked! */
370 static void decrease_watermark(struct userdata *u) {
371 size_t old_watermark;
375 pa_assert(u->use_tsched);
377 now = pa_rtclock_now();
379 if (u->watermark_dec_not_before <= 0)
382 if (u->watermark_dec_not_before > now)
385 old_watermark = u->tsched_watermark;
387 if (u->tsched_watermark < u->watermark_dec_step)
388 u->tsched_watermark = u->tsched_watermark / 2;
390 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
392 fix_tsched_watermark(u);
394 if (old_watermark != u->tsched_watermark)
395 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
396 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
398 /* We don't change the latency range*/
401 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
404 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
407 pa_assert(sleep_usec);
408 pa_assert(process_usec);
411 pa_assert(u->use_tsched);
413 usec = pa_sink_get_requested_latency_within_thread(u->sink);
415 if (usec == (pa_usec_t) -1)
416 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
418 wm = u->tsched_watermark_usec;
423 *sleep_usec = usec - wm;
427 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
428 (unsigned long) (usec / PA_USEC_PER_MSEC),
429 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
430 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
434 static int try_recover(struct userdata *u, const char *call, int err) {
439 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
441 pa_assert(err != -EAGAIN);
444 pa_log_debug("%s: Buffer underrun!", call);
446 if (err == -ESTRPIPE)
447 pa_log_debug("%s: System suspended!", call);
449 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
450 pa_log("%s: %s", call, pa_alsa_strerror(err));
459 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, bool on_timeout) {
461 bool underrun = false;
463 /* We use <= instead of < for this check here because an underrun
464 * only happens after the last sample was processed, not already when
465 * it is removed from the buffer. This is particularly important
466 * when block transfer is used. */
468 if (n_bytes <= u->hwbuf_size)
469 left_to_play = u->hwbuf_size - n_bytes;
472 /* We got a dropout. What a mess! */
480 if (!u->first && !u->after_rewind)
481 if (pa_log_ratelimit(PA_LOG_INFO))
482 pa_log_info("Underrun!");
486 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
487 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
488 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
489 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
493 bool reset_not_before = true;
495 if (!u->first && !u->after_rewind) {
496 if (underrun || left_to_play < u->watermark_inc_threshold)
497 increase_watermark(u);
498 else if (left_to_play > u->watermark_dec_threshold) {
499 reset_not_before = false;
501 /* We decrease the watermark only if have actually
502 * been woken up by a timeout. If something else woke
503 * us up it's too easy to fulfill the deadlines... */
506 decrease_watermark(u);
510 if (reset_not_before)
511 u->watermark_dec_not_before = 0;
517 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
518 bool work_done = false;
519 pa_usec_t max_sleep_usec = 0, process_usec = 0;
520 size_t left_to_play, input_underrun;
524 pa_sink_assert_ref(u->sink);
527 hw_sleep_time(u, &max_sleep_usec, &process_usec);
533 bool after_avail = true;
535 /* First we determine how many samples are missing to fill the
536 * buffer up to 100% */
538 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
540 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
546 n_bytes = (size_t) n * u->frame_size;
549 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
552 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
557 /* We won't fill up the playback buffer before at least
558 * half the sleep time is over because otherwise we might
559 * ask for more data from the clients then they expect. We
560 * need to guarantee that clients only have to keep around
561 * a single hw buffer length. */
564 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
566 pa_log_debug("Not filling up, because too early.");
571 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
575 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
576 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write.\n"
577 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
578 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
584 pa_log_debug("Not filling up, because not necessary.");
593 pa_log_debug("Not filling up, because already too many iterations.");
597 } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
599 pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
604 n_bytes -= u->hwbuf_unused;
608 pa_log_debug("Filling up");
615 const snd_pcm_channel_area_t *areas;
616 snd_pcm_uframes_t offset, frames;
617 snd_pcm_sframes_t sframes;
620 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
621 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
623 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
625 if (!after_avail && err == -EAGAIN)
628 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
634 /* Make sure that if these memblocks need to be copied they will fit into one slot */
635 frames = PA_MIN(frames, u->frames_per_block);
637 if (!after_avail && frames == 0)
640 pa_assert(frames > 0);
643 /* Check these are multiples of 8 bit */
644 pa_assert((areas[0].first & 7) == 0);
645 pa_assert((areas[0].step & 7) == 0);
647 /* We assume a single interleaved memory buffer */
648 pa_assert((areas[0].first >> 3) == 0);
649 pa_assert((areas[0].step >> 3) == u->frame_size);
651 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
653 written = frames * u->frame_size;
654 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, true);
655 chunk.length = pa_memblock_get_length(chunk.memblock);
658 pa_sink_render_into_full(u->sink, &chunk);
659 pa_memblock_unref_fixed(chunk.memblock);
661 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
663 if ((int) sframes == -EAGAIN)
666 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
674 u->write_count += written;
675 u->since_start += written;
678 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
681 if (written >= n_bytes)
688 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
691 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
693 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
694 process_usec = u->tsched_watermark_usec;
696 if (*sleep_usec > process_usec)
697 *sleep_usec -= process_usec;
701 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
705 return work_done ? 1 : 0;
708 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
709 bool work_done = false;
710 pa_usec_t max_sleep_usec = 0, process_usec = 0;
711 size_t left_to_play, input_underrun;
715 pa_sink_assert_ref(u->sink);
718 hw_sleep_time(u, &max_sleep_usec, &process_usec);
724 bool after_avail = true;
726 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
728 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
734 n_bytes = (size_t) n * u->frame_size;
737 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
740 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
745 /* We won't fill up the playback buffer before at least
746 * half the sleep time is over because otherwise we might
747 * ask for more data from the clients then they expect. We
748 * need to guarantee that clients only have to keep around
749 * a single hw buffer length. */
752 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
755 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
759 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
760 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
761 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
762 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
774 pa_log_debug("Not filling up, because already too many iterations.");
778 } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
780 pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
785 n_bytes -= u->hwbuf_unused;
789 snd_pcm_sframes_t frames;
793 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
795 if (u->memchunk.length <= 0)
796 pa_sink_render(u->sink, n_bytes, &u->memchunk);
798 pa_assert(u->memchunk.length > 0);
800 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
802 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
803 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
805 p = pa_memblock_acquire(u->memchunk.memblock);
806 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
807 pa_memblock_release(u->memchunk.memblock);
809 if (PA_UNLIKELY(frames < 0)) {
811 if (!after_avail && (int) frames == -EAGAIN)
814 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
820 if (!after_avail && frames == 0)
823 pa_assert(frames > 0);
826 written = frames * u->frame_size;
827 u->memchunk.index += written;
828 u->memchunk.length -= written;
830 if (u->memchunk.length <= 0) {
831 pa_memblock_unref(u->memchunk.memblock);
832 pa_memchunk_reset(&u->memchunk);
837 u->write_count += written;
838 u->since_start += written;
840 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
842 if (written >= n_bytes)
849 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
852 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
854 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
855 process_usec = u->tsched_watermark_usec;
857 if (*sleep_usec > process_usec)
858 *sleep_usec -= process_usec;
862 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
866 return work_done ? 1 : 0;
869 static void update_smoother(struct userdata *u) {
870 snd_pcm_sframes_t delay = 0;
873 pa_usec_t now1 = 0, now2;
874 snd_pcm_status_t *status;
875 snd_htimestamp_t htstamp = { 0, 0 };
877 snd_pcm_status_alloca(&status);
880 pa_assert(u->pcm_handle);
882 /* Let's update the time smoother */
884 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, false)) < 0)) {
885 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
889 snd_pcm_status_get_htstamp(status, &htstamp);
890 now1 = pa_timespec_load(&htstamp);
892 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
894 now1 = pa_rtclock_now();
896 /* check if the time since the last update is bigger than the interval */
897 if (u->last_smoother_update > 0)
898 if (u->last_smoother_update + u->smoother_interval > now1)
901 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
903 if (PA_UNLIKELY(position < 0))
906 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
908 pa_smoother_put(u->smoother, now1, now2);
910 u->last_smoother_update = now1;
911 /* exponentially increase the update interval up to the MAX limit */
912 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
915 static int64_t sink_get_latency(struct userdata *u) {
917 pa_usec_t now1, now2;
921 now1 = pa_rtclock_now();
922 now2 = pa_smoother_get(u->smoother, now1);
924 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
926 if (u->memchunk.memblock)
927 delay += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
932 static int build_pollfd(struct userdata *u) {
934 pa_assert(u->pcm_handle);
936 if (u->alsa_rtpoll_item)
937 pa_rtpoll_item_free(u->alsa_rtpoll_item);
939 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
945 /* Called from IO context */
946 static void suspend(struct userdata *u) {
948 pa_assert(u->pcm_handle);
950 pa_smoother_pause(u->smoother, pa_rtclock_now());
952 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
953 * take awfully long with our long buffer sizes today. */
954 snd_pcm_close(u->pcm_handle);
955 u->pcm_handle = NULL;
957 if (u->alsa_rtpoll_item) {
958 pa_rtpoll_item_free(u->alsa_rtpoll_item);
959 u->alsa_rtpoll_item = NULL;
962 /* We reset max_rewind/max_request here to make sure that while we
963 * are suspended the old max_request/max_rewind values set before
964 * the suspend can influence the per-stream buffer of newly
965 * created streams, without their requirements having any
966 * influence on them. */
967 pa_sink_set_max_rewind_within_thread(u->sink, 0);
968 pa_sink_set_max_request_within_thread(u->sink, 0);
970 pa_log_info("Device suspended...");
973 /* Called from IO context */
974 static int update_sw_params(struct userdata *u, bool may_need_rewind) {
976 snd_pcm_uframes_t avail_min;
981 /* Use the full buffer if no one asked us for anything specific */
982 old_unused = u->hwbuf_unused;
988 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
991 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
993 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
995 /* We need at least one sample in our buffer */
997 if (PA_UNLIKELY(b < u->frame_size))
1000 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
1003 fix_min_sleep_wakeup(u);
1004 fix_tsched_watermark(u);
1007 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
1009 /* We need at last one frame in the used part of the buffer */
1010 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
1012 if (u->use_tsched) {
1013 pa_usec_t sleep_usec, process_usec;
1015 hw_sleep_time(u, &sleep_usec, &process_usec);
1016 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1019 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1021 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1022 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1026 /* If we're lowering the latency, we need to do a rewind, because otherwise
1027 * we might end up in a situation where the hw buffer contains more data
1028 * than the new configured latency. The rewind has to be requested before
1029 * updating max_rewind, because the rewind amount is limited to max_rewind.
1031 * If may_need_rewind is false, it means that we're just starting playback,
1032 * and rewinding is never needed in that situation. */
1033 if (may_need_rewind && u->hwbuf_unused > old_unused) {
1034 pa_log_debug("Requesting rewind due to latency change.");
1035 pa_sink_request_rewind(u->sink, (size_t) -1);
1038 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1039 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1040 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1042 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1043 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1049 /* Called from IO Context on unsuspend or from main thread when creating sink */
1050 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1052 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->sink->sample_spec);
1054 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1055 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1057 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1058 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1060 fix_min_sleep_wakeup(u);
1061 fix_tsched_watermark(u);
1064 pa_sink_set_latency_range_within_thread(u->sink,
1066 pa_bytes_to_usec(u->hwbuf_size, ss));
1068 pa_sink_set_latency_range(u->sink,
1070 pa_bytes_to_usec(u->hwbuf_size, ss));
1072 /* work-around assert in pa_sink_set_latency_within_thead,
1073 keep track of min_latency and reuse it when
1074 this routine is called from IO context */
1075 u->min_latency_ref = u->sink->thread_info.min_latency;
1078 pa_log_info("Time scheduling watermark is %0.2fms",
1079 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1082 /* Called from IO context */
1083 static int unsuspend(struct userdata *u) {
1087 snd_pcm_uframes_t period_size, buffer_size;
1088 char *device_name = NULL;
1091 pa_assert(!u->pcm_handle);
1093 pa_log_info("Trying resume...");
1095 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1096 /* Need to open device in NONAUDIO mode */
1097 int len = strlen(u->device_name) + 8;
1099 device_name = pa_xmalloc(len);
1100 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1103 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1105 SND_PCM_NO_AUTO_RESAMPLE|
1106 SND_PCM_NO_AUTO_CHANNELS|
1107 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1108 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1112 ss = u->sink->sample_spec;
1113 period_size = u->fragment_size / u->frame_size;
1114 buffer_size = u->hwbuf_size / u->frame_size;
1118 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
1119 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1123 if (b != u->use_mmap || d != u->use_tsched) {
1124 pa_log_warn("Resume failed, couldn't get original access mode.");
1128 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1129 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1133 if (period_size*u->frame_size != u->fragment_size ||
1134 buffer_size*u->frame_size != u->hwbuf_size) {
1135 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1136 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1137 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1141 if (update_sw_params(u, false) < 0)
1144 if (build_pollfd(u) < 0)
1148 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1149 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1150 u->last_smoother_update = 0;
1155 /* reset the watermark to the value defined when sink was created */
1157 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, true);
1159 pa_log_info("Resumed successfully...");
1161 pa_xfree(device_name);
1165 if (u->pcm_handle) {
1166 snd_pcm_close(u->pcm_handle);
1167 u->pcm_handle = NULL;
1170 pa_xfree(device_name);
1175 /* Called from the IO thread or the main thread depending on whether deferred
1176 * volume is enabled or not (with deferred volume all mixer handling is done
1177 * from the IO thread).
1179 * Sets the mixer settings to match the current sink and port state (the port
1180 * is given as an argument, because active_port may still point to the old
1181 * port, if we're switching ports). */
1182 static void sync_mixer(struct userdata *u, pa_device_port *port) {
1183 pa_alsa_setting *setting = NULL;
1190 /* port may be NULL, because if we use a synthesized mixer path, then the
1191 * sink has no ports. */
1193 pa_alsa_port_data *data;
1195 data = PA_DEVICE_PORT_DATA(port);
1196 setting = data->setting;
1199 pa_alsa_path_select(u->mixer_path, setting, u->mixer_handle, u->sink->muted);
1201 if (u->sink->set_mute)
1202 u->sink->set_mute(u->sink);
1203 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1204 if (u->sink->write_volume)
1205 u->sink->write_volume(u->sink);
1207 if (u->sink->set_volume)
1208 u->sink->set_volume(u->sink);
1212 /* Called from IO context */
1213 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1214 struct userdata *u = PA_SINK(o)->userdata;
1218 case PA_SINK_MESSAGE_GET_LATENCY: {
1222 r = sink_get_latency(u);
1224 *((int64_t*) data) = r;
1229 case SINK_MESSAGE_SYNC_MIXER: {
1230 pa_device_port *port = data;
1232 sync_mixer(u, port);
1237 return pa_sink_process_msg(o, code, data, offset, chunk);
1240 /* Called from main context */
1241 static int sink_set_state_in_main_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1242 pa_sink_state_t old_state;
1245 pa_sink_assert_ref(s);
1246 pa_assert_se(u = s->userdata);
1248 /* When our session becomes active, we need to sync the mixer, because
1249 * another user may have changed the mixer settings.
1251 * If deferred volume is enabled, the syncing is done in the
1252 * set_state_in_io_thread() callback instead. */
1253 if (!(s->flags & PA_SINK_DEFERRED_VOLUME)
1254 && (s->suspend_cause & PA_SUSPEND_SESSION)
1255 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1256 sync_mixer(u, s->active_port);
1258 old_state = pa_sink_get_state(u->sink);
1260 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1262 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1263 if (reserve_init(u, u->device_name) < 0)
1264 return -PA_ERR_BUSY;
1269 /* Called from the IO thread. */
1270 static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1274 pa_assert_se(u = s->userdata);
1276 /* When our session becomes active, we need to sync the mixer, because
1277 * another user may have changed the mixer settings.
1279 * If deferred volume is disabled, the syncing is done in the
1280 * set_state_in_main_thread() callback instead. */
1281 if ((s->flags & PA_SINK_DEFERRED_VOLUME)
1282 && (s->suspend_cause & PA_SUSPEND_SESSION)
1283 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1284 sync_mixer(u, s->active_port);
1286 /* It may be that only the suspend cause is changing, in which case there's
1287 * nothing more to do. */
1288 if (new_state == s->thread_info.state)
1291 switch (new_state) {
1293 case PA_SINK_SUSPENDED: {
1294 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1302 case PA_SINK_RUNNING: {
1305 if (u->sink->thread_info.state == PA_SINK_INIT) {
1306 if (build_pollfd(u) < 0)
1307 /* FIXME: This will cause an assertion failure, because
1308 * with the current design pa_sink_put() is not allowed
1309 * to fail and pa_sink_put() has no fallback code that
1310 * would start the sink suspended if opening the device
1315 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1316 if ((r = unsuspend(u)) < 0)
1323 case PA_SINK_UNLINKED:
1325 case PA_SINK_INVALID_STATE:
1332 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1333 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1336 pa_assert(u->mixer_handle);
1338 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1341 if (!PA_SINK_IS_LINKED(u->sink->state))
1344 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1347 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1348 pa_sink_get_volume(u->sink, true);
1349 pa_sink_get_mute(u->sink, true);
1355 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1356 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1359 pa_assert(u->mixer_handle);
1361 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1364 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1367 if (mask & SND_CTL_EVENT_MASK_VALUE)
1368 pa_sink_update_volume_and_mute(u->sink);
1373 static void sink_get_volume_cb(pa_sink *s) {
1374 struct userdata *u = s->userdata;
1376 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1379 pa_assert(u->mixer_path);
1380 pa_assert(u->mixer_handle);
1382 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1385 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1386 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1388 pa_log_debug("Read hardware volume: %s",
1389 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1391 if (pa_cvolume_equal(&u->hardware_volume, &r))
1394 s->real_volume = u->hardware_volume = r;
1396 /* Hmm, so the hardware volume changed, let's reset our software volume */
1397 if (u->mixer_path->has_dB)
1398 pa_sink_set_soft_volume(s, NULL);
1401 static void sink_set_volume_cb(pa_sink *s) {
1402 struct userdata *u = s->userdata;
1404 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1405 bool deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1408 pa_assert(u->mixer_path);
1409 pa_assert(u->mixer_handle);
1411 /* Shift up by the base volume */
1412 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1414 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1417 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1418 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1420 u->hardware_volume = r;
1422 if (u->mixer_path->has_dB) {
1423 pa_cvolume new_soft_volume;
1424 bool accurate_enough;
1426 /* Match exactly what the user requested by software */
1427 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1429 /* If the adjustment to do in software is only minimal we
1430 * can skip it. That saves us CPU at the expense of a bit of
1433 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1434 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1436 pa_log_debug("Requested volume: %s",
1437 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1438 pa_log_debug("Got hardware volume: %s",
1439 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1440 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1441 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1442 pa_yes_no(accurate_enough));
1444 if (!accurate_enough)
1445 s->soft_volume = new_soft_volume;
1448 pa_log_debug("Wrote hardware volume: %s",
1449 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1451 /* We can't match exactly what the user requested, hence let's
1452 * at least tell the user about it */
1458 static void sink_write_volume_cb(pa_sink *s) {
1459 struct userdata *u = s->userdata;
1460 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1463 pa_assert(u->mixer_path);
1464 pa_assert(u->mixer_handle);
1465 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1467 /* Shift up by the base volume */
1468 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1470 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1471 pa_log_error("Writing HW volume failed");
1474 bool accurate_enough;
1476 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1477 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1479 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1481 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1482 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1484 if (!accurate_enough) {
1485 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1487 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1488 pa_cvolume_snprint_verbose(volume_buf[0],
1489 sizeof(volume_buf[0]),
1490 &s->thread_info.current_hw_volume,
1493 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1498 static int sink_get_mute_cb(pa_sink *s, bool *mute) {
1499 struct userdata *u = s->userdata;
1502 pa_assert(u->mixer_path);
1503 pa_assert(u->mixer_handle);
1505 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1511 static void sink_set_mute_cb(pa_sink *s) {
1512 struct userdata *u = s->userdata;
1515 pa_assert(u->mixer_path);
1516 pa_assert(u->mixer_handle);
1518 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1521 static void mixer_volume_init(struct userdata *u) {
1524 if (!u->mixer_path->has_volume) {
1525 pa_sink_set_write_volume_callback(u->sink, NULL);
1526 pa_sink_set_get_volume_callback(u->sink, NULL);
1527 pa_sink_set_set_volume_callback(u->sink, NULL);
1529 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1531 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1532 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1534 if (u->mixer_path->has_dB && u->deferred_volume) {
1535 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1536 pa_log_info("Successfully enabled deferred volume.");
1538 pa_sink_set_write_volume_callback(u->sink, NULL);
1540 if (u->mixer_path->has_dB) {
1541 pa_sink_enable_decibel_volume(u->sink, true);
1542 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1544 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1545 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1547 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1549 pa_sink_enable_decibel_volume(u->sink, false);
1550 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1552 u->sink->base_volume = PA_VOLUME_NORM;
1553 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1556 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1559 if (!u->mixer_path->has_mute) {
1560 pa_sink_set_get_mute_callback(u->sink, NULL);
1561 pa_sink_set_set_mute_callback(u->sink, NULL);
1562 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1564 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1565 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1566 pa_log_info("Using hardware mute control.");
1570 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1571 struct userdata *u = s->userdata;
1575 pa_assert(u->ucm_context);
1577 return pa_alsa_ucm_set_port(u->ucm_context, p, true);
1580 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1581 struct userdata *u = s->userdata;
1582 pa_alsa_port_data *data;
1586 pa_assert(u->mixer_handle);
1588 data = PA_DEVICE_PORT_DATA(p);
1589 pa_assert_se(u->mixer_path = data->path);
1590 mixer_volume_init(u);
1592 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1593 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_SYNC_MIXER, p, 0, NULL);
1597 if (data->suspend_when_unavailable && p->available == PA_AVAILABLE_NO)
1598 pa_sink_suspend(s, true, PA_SUSPEND_UNAVAILABLE);
1600 pa_sink_suspend(s, false, PA_SUSPEND_UNAVAILABLE);
1605 static void sink_update_requested_latency_cb(pa_sink *s) {
1606 struct userdata *u = s->userdata;
1608 pa_assert(u->use_tsched); /* only when timer scheduling is used
1609 * we can dynamically adjust the
1615 update_sw_params(u, true);
1618 static pa_idxset* sink_get_formats(pa_sink *s) {
1619 struct userdata *u = s->userdata;
1623 return pa_idxset_copy(u->formats, (pa_copy_func_t) pa_format_info_copy);
1626 static bool sink_set_formats(pa_sink *s, pa_idxset *formats) {
1627 struct userdata *u = s->userdata;
1628 pa_format_info *f, *g;
1633 /* FIXME: also validate sample rates against what the device supports */
1634 PA_IDXSET_FOREACH(f, formats, idx) {
1635 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1636 /* EAC3 cannot be sent over over S/PDIF */
1640 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1641 u->formats = pa_idxset_new(NULL, NULL);
1643 /* Note: the logic below won't apply if we're using software encoding.
1644 * This is fine for now since we don't support that via the passthrough
1645 * framework, but this must be changed if we do. */
1647 /* Count how many sample rates we support */
1648 for (idx = 0, n = 0; u->rates[idx]; idx++)
1651 /* First insert non-PCM formats since we prefer those. */
1652 PA_IDXSET_FOREACH(f, formats, idx) {
1653 if (!pa_format_info_is_pcm(f)) {
1654 g = pa_format_info_copy(f);
1655 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1656 pa_idxset_put(u->formats, g, NULL);
1660 /* Now add any PCM formats */
1661 PA_IDXSET_FOREACH(f, formats, idx) {
1662 if (pa_format_info_is_pcm(f)) {
1663 /* We don't set rates here since we'll just tack on a resampler for
1664 * unsupported rates */
1665 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1672 static int sink_reconfigure_cb(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1673 struct userdata *u = s->userdata;
1675 bool supported = false;
1677 /* FIXME: we only update rate for now */
1681 for (i = 0; u->rates[i]; i++) {
1682 if (u->rates[i] == spec->rate) {
1689 pa_log_info("Sink does not support sample rate of %d Hz", spec->rate);
1693 if (!PA_SINK_IS_OPENED(s->state)) {
1694 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, spec->rate);
1695 u->sink->sample_spec.rate = spec->rate;
1699 /* Passthrough status change is handled during unsuspend */
1704 static int process_rewind(struct userdata *u) {
1705 snd_pcm_sframes_t unused;
1706 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1709 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1710 pa_sink_process_rewind(u->sink, 0);
1714 /* Figure out how much we shall rewind and reset the counter */
1715 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1717 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1719 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1720 if (try_recover(u, "snd_pcm_avail", (int) unused) < 0) {
1721 pa_log_warn("Trying to recover from underrun failed during rewind");
1726 unused_nbytes = (size_t) unused * u->frame_size;
1728 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1729 unused_nbytes += u->rewind_safeguard;
1731 if (u->hwbuf_size > unused_nbytes)
1732 limit_nbytes = u->hwbuf_size - unused_nbytes;
1736 if (rewind_nbytes > limit_nbytes)
1737 rewind_nbytes = limit_nbytes;
1739 if (rewind_nbytes > 0) {
1740 snd_pcm_sframes_t in_frames, out_frames;
1742 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1744 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1745 pa_log_debug("before: %lu", (unsigned long) in_frames);
1746 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1747 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1748 if (try_recover(u, "process_rewind", out_frames) < 0)
1753 pa_log_debug("after: %lu", (unsigned long) out_frames);
1755 rewind_nbytes = (size_t) out_frames * u->frame_size;
1757 if (rewind_nbytes <= 0)
1758 pa_log_info("Tried rewind, but was apparently not possible.");
1760 u->write_count -= rewind_nbytes;
1761 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1762 pa_sink_process_rewind(u->sink, rewind_nbytes);
1764 u->after_rewind = true;
1768 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1770 pa_sink_process_rewind(u->sink, 0);
1774 static void thread_func(void *userdata) {
1775 struct userdata *u = userdata;
1776 unsigned short revents = 0;
1780 pa_log_debug("Thread starting up");
1782 if (u->core->realtime_scheduling)
1783 pa_make_realtime(u->core->realtime_priority);
1785 pa_thread_mq_install(&u->thread_mq);
1789 pa_usec_t rtpoll_sleep = 0, real_sleep;
1792 pa_log_debug("Loop");
1795 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1796 if (process_rewind(u) < 0)
1800 /* Render some data and write it to the dsp */
1801 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1803 pa_usec_t sleep_usec = 0;
1804 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1807 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1809 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1814 /* pa_log_debug("work_done = %i", work_done); */
1819 pa_log_info("Starting playback.");
1820 snd_pcm_start(u->pcm_handle);
1822 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1830 if (u->use_tsched) {
1833 if (u->since_start <= u->hwbuf_size) {
1835 /* USB devices on ALSA seem to hit a buffer
1836 * underrun during the first iterations much
1837 * quicker then we calculate here, probably due to
1838 * the transport latency. To accommodate for that
1839 * we artificially decrease the sleep time until
1840 * we have filled the buffer at least once
1843 if (pa_log_ratelimit(PA_LOG_DEBUG))
1844 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1848 /* OK, the playback buffer is now full, let's
1849 * calculate when to wake up next */
1851 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1854 /* Convert from the sound card time domain to the
1855 * system time domain */
1856 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1859 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1862 /* We don't trust the conversion, so we wake up whatever comes first */
1863 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1866 u->after_rewind = false;
1870 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1871 pa_usec_t volume_sleep;
1872 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1873 if (volume_sleep > 0) {
1874 if (rtpoll_sleep > 0)
1875 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1877 rtpoll_sleep = volume_sleep;
1881 if (rtpoll_sleep > 0) {
1882 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1883 real_sleep = pa_rtclock_now();
1886 pa_rtpoll_set_timer_disabled(u->rtpoll);
1888 /* Hmm, nothing to do. Let's sleep */
1889 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1892 if (rtpoll_sleep > 0) {
1893 real_sleep = pa_rtclock_now() - real_sleep;
1895 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1896 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1897 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1899 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1900 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1901 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1902 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1905 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1906 pa_sink_volume_change_apply(u->sink, NULL);
1911 /* Tell ALSA about this and process its response */
1912 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1913 struct pollfd *pollfd;
1917 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1919 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1920 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1924 if (revents & ~POLLOUT) {
1925 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1931 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1932 pa_log_debug("Wakeup from ALSA!");
1939 /* If this was no regular exit from the loop we have to continue
1940 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1941 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1942 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1945 pa_log_debug("Thread shutting down");
1948 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1954 pa_assert(device_name);
1956 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1957 pa_sink_new_data_set_name(data, n);
1958 data->namereg_fail = true;
1962 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1963 data->namereg_fail = true;
1965 n = device_id ? device_id : device_name;
1966 data->namereg_fail = false;
1970 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1972 t = pa_sprintf_malloc("alsa_output.%s", n);
1974 pa_sink_new_data_set_name(data, t);
1978 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1979 if (!mapping && !element)
1982 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1983 pa_log_info("Failed to find a working mixer device.");
1989 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1992 if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
1995 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1996 pa_alsa_path_dump(u->mixer_path);
1997 } else if (!(u->mixer_path_set = mapping->output_path_set))
2004 if (u->mixer_path) {
2005 pa_alsa_path_free(u->mixer_path);
2006 u->mixer_path = NULL;
2009 if (u->mixer_handle) {
2010 snd_mixer_close(u->mixer_handle);
2011 u->mixer_handle = NULL;
2015 static int setup_mixer(struct userdata *u, bool ignore_dB) {
2016 bool need_mixer_callback = false;
2020 if (!u->mixer_handle)
2023 if (u->sink->active_port) {
2024 pa_alsa_port_data *data;
2026 /* We have a list of supported paths, so let's activate the
2027 * one that has been chosen as active */
2029 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2030 u->mixer_path = data->path;
2032 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
2036 if (!u->mixer_path && u->mixer_path_set)
2037 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
2039 if (u->mixer_path) {
2040 /* Hmm, we have only a single path, then let's activate it */
2042 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
2048 mixer_volume_init(u);
2050 /* Will we need to register callbacks? */
2051 if (u->mixer_path_set && u->mixer_path_set->paths) {
2055 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
2056 if (p->has_volume || p->has_mute)
2057 need_mixer_callback = true;
2060 else if (u->mixer_path)
2061 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
2063 if (need_mixer_callback) {
2064 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
2065 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
2066 u->mixer_pd = pa_alsa_mixer_pdata_new();
2067 mixer_callback = io_mixer_callback;
2069 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
2070 pa_log("Failed to initialize file descriptor monitoring");
2074 u->mixer_fdl = pa_alsa_fdlist_new();
2075 mixer_callback = ctl_mixer_callback;
2077 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2078 pa_log("Failed to initialize file descriptor monitoring");
2083 if (u->mixer_path_set)
2084 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2086 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2092 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2094 struct userdata *u = NULL;
2095 const char *dev_id = NULL, *key, *mod_name;
2097 char *thread_name = NULL;
2098 uint32_t alternate_sample_rate;
2100 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2101 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2103 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, set_formats = false, fixed_latency_range = false;
2104 pa_sink_new_data data;
2107 pa_alsa_profile_set *profile_set = NULL;
2113 ss = m->core->default_sample_spec;
2114 map = m->core->default_channel_map;
2116 /* Pick sample spec overrides from the mapping, if any */
2118 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2119 ss.format = mapping->sample_spec.format;
2120 if (mapping->sample_spec.rate != 0)
2121 ss.rate = mapping->sample_spec.rate;
2122 if (mapping->sample_spec.channels != 0) {
2123 ss.channels = mapping->sample_spec.channels;
2124 if (pa_channel_map_valid(&mapping->channel_map))
2125 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2129 /* Override with modargs if provided */
2130 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2131 pa_log("Failed to parse sample specification and channel map");
2135 alternate_sample_rate = m->core->alternate_sample_rate;
2136 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2137 pa_log("Failed to parse alternate sample rate");
2141 frame_size = pa_frame_size(&ss);
2143 nfrags = m->core->default_n_fragments;
2144 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2146 frag_size = (uint32_t) frame_size;
2147 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2148 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2150 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2151 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2152 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2153 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2154 pa_log("Failed to parse buffer metrics");
2158 buffer_size = nfrags * frag_size;
2160 period_frames = frag_size/frame_size;
2161 buffer_frames = buffer_size/frame_size;
2162 tsched_frames = tsched_size/frame_size;
2164 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2165 pa_log("Failed to parse mmap argument.");
2169 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2170 pa_log("Failed to parse tsched argument.");
2174 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2175 pa_log("Failed to parse ignore_dB argument.");
2179 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2180 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2181 pa_log("Failed to parse rewind_safeguard argument");
2185 deferred_volume = m->core->deferred_volume;
2186 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2187 pa_log("Failed to parse deferred_volume argument.");
2191 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2192 pa_log("Failed to parse fixed_latency_range argument.");
2196 use_tsched = pa_alsa_may_tsched(use_tsched);
2198 u = pa_xnew0(struct userdata, 1);
2201 u->use_mmap = use_mmap;
2202 u->use_tsched = use_tsched;
2203 u->deferred_volume = deferred_volume;
2204 u->fixed_latency_range = fixed_latency_range;
2206 u->rewind_safeguard = rewind_safeguard;
2207 u->rtpoll = pa_rtpoll_new();
2209 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2210 pa_log("pa_thread_mq_init() failed.");
2214 u->smoother = pa_smoother_new(
2215 SMOOTHER_ADJUST_USEC,
2216 SMOOTHER_WINDOW_USEC,
2222 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2225 if (mapping && mapping->ucm_context.ucm)
2226 u->ucm_context = &mapping->ucm_context;
2228 dev_id = pa_modargs_get_value(
2230 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2232 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2234 if (reserve_init(u, dev_id) < 0)
2237 if (reserve_monitor_init(u, dev_id) < 0)
2243 /* Force ALSA to reread its configuration if module-alsa-card didn't
2244 * do it for us. This matters if our device was hot-plugged after ALSA
2245 * has already read its configuration - see
2246 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2250 snd_config_update_free_global();
2254 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2255 pa_log("device_id= not set");
2259 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2260 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2261 pa_log("Failed to enable ucm modifier %s", mod_name);
2263 pa_log_debug("Enabled ucm modifier %s", mod_name);
2266 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2270 SND_PCM_STREAM_PLAYBACK,
2271 &period_frames, &buffer_frames, tsched_frames,
2275 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2277 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2280 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2284 SND_PCM_STREAM_PLAYBACK,
2285 &period_frames, &buffer_frames, tsched_frames,
2286 &b, &d, profile_set, &mapping)))
2291 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2292 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2295 SND_PCM_STREAM_PLAYBACK,
2296 &period_frames, &buffer_frames, tsched_frames,
2301 pa_assert(u->device_name);
2302 pa_log_info("Successfully opened device %s.", u->device_name);
2304 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2305 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2310 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2312 if (use_mmap && !b) {
2313 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2314 u->use_mmap = use_mmap = false;
2317 if (use_tsched && (!b || !d)) {
2318 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2319 u->use_tsched = use_tsched = false;
2323 pa_log_info("Successfully enabled mmap() mode.");
2325 if (u->use_tsched) {
2326 pa_log_info("Successfully enabled timer-based scheduling mode.");
2328 if (u->fixed_latency_range)
2329 pa_log_info("Disabling latency range changes on underrun");
2332 /* All passthrough formats supported by PulseAudio require
2333 * IEC61937 framing with two fake channels. So, passthrough
2334 * clients will always send two channels. Multichannel sinks
2335 * cannot accept that, because nobody implemented sink channel count
2336 * switching so far. So just don't show known non-working settings
2338 if ((is_iec958(u) || is_hdmi(u)) && ss.channels == 2)
2341 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2343 pa_log_error("Failed to find any supported sample rates.");
2347 /* ALSA might tweak the sample spec, so recalculate the frame size */
2348 frame_size = pa_frame_size(&ss);
2350 if (!u->ucm_context)
2351 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2353 pa_sink_new_data_init(&data);
2354 data.driver = driver;
2357 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2359 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2360 * variable instead of using &data.namereg_fail directly, because
2361 * data.namereg_fail is a bitfield and taking the address of a bitfield
2362 * variable is impossible. */
2363 namereg_fail = data.namereg_fail;
2364 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2365 pa_log("Failed to parse namereg_fail argument.");
2366 pa_sink_new_data_done(&data);
2369 data.namereg_fail = namereg_fail;
2371 pa_sink_new_data_set_sample_spec(&data, &ss);
2372 pa_sink_new_data_set_channel_map(&data, &map);
2373 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2375 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2376 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2377 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2378 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2379 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2382 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2383 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2385 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2386 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2389 pa_alsa_init_description(data.proplist, card);
2391 if (u->control_device)
2392 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2394 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2395 pa_log("Invalid properties");
2396 pa_sink_new_data_done(&data);
2401 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, true, card);
2402 else if (u->mixer_path_set)
2403 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2405 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2406 (set_formats ? PA_SINK_SET_FORMATS : 0));
2407 volume_is_set = data.volume_is_set;
2408 mute_is_set = data.muted_is_set;
2409 pa_sink_new_data_done(&data);
2412 pa_log("Failed to create sink object");
2416 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2417 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2418 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2422 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2423 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2424 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2428 u->sink->parent.process_msg = sink_process_msg;
2430 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2431 u->sink->set_state_in_main_thread = sink_set_state_in_main_thread_cb;
2432 u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb;
2434 u->sink->set_port = sink_set_port_ucm_cb;
2436 u->sink->set_port = sink_set_port_cb;
2437 if (u->sink->alternate_sample_rate)
2438 u->sink->reconfigure = sink_reconfigure_cb;
2439 u->sink->userdata = u;
2441 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2442 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2444 u->frame_size = frame_size;
2445 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2446 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2447 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2448 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2450 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2451 (double) u->hwbuf_size / (double) u->fragment_size,
2452 (long unsigned) u->fragment_size,
2453 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2454 (long unsigned) u->hwbuf_size,
2455 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2457 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2458 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2459 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2461 pa_log_info("Disabling rewind for device %s", u->device_name);
2462 pa_sink_set_max_rewind(u->sink, 0);
2465 if (u->use_tsched) {
2466 u->tsched_watermark_ref = tsched_watermark;
2467 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2469 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2473 if (update_sw_params(u, false) < 0)
2476 if (u->ucm_context) {
2477 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, true) < 0)
2479 } else if (setup_mixer(u, ignore_dB) < 0)
2482 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2484 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2485 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2486 pa_log("Failed to create thread.");
2489 pa_xfree(thread_name);
2492 /* Get initial mixer settings */
2493 if (volume_is_set) {
2494 if (u->sink->set_volume)
2495 u->sink->set_volume(u->sink);
2497 if (u->sink->get_volume)
2498 u->sink->get_volume(u->sink);
2502 if (u->sink->set_mute)
2503 u->sink->set_mute(u->sink);
2505 if (u->sink->get_mute) {
2508 if (u->sink->get_mute(u->sink, &mute) >= 0)
2509 pa_sink_set_mute(u->sink, mute, false);
2513 if ((volume_is_set || mute_is_set) && u->sink->write_volume)
2514 u->sink->write_volume(u->sink);
2517 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2518 pa_format_info *format;
2520 /* To start with, we only support PCM formats. Other formats may be added
2521 * with pa_sink_set_formats().*/
2522 format = pa_format_info_new();
2523 format->encoding = PA_ENCODING_PCM;
2524 u->formats = pa_idxset_new(NULL, NULL);
2525 pa_idxset_put(u->formats, format, NULL);
2527 u->sink->get_formats = sink_get_formats;
2528 u->sink->set_formats = sink_set_formats;
2531 pa_sink_put(u->sink);
2534 pa_alsa_profile_set_free(profile_set);
2536 /* Suspend if necessary. FIXME: It would be better to start suspended, but
2537 * that would require some core changes. It's possible to set
2538 * pa_sink_new_data.suspend_cause, but that has to be done before the
2539 * pa_sink_new() call, and we know if we need to suspend only after the
2540 * pa_sink_new() call when the initial port has been chosen. Calling
2541 * pa_sink_suspend() between pa_sink_new() and pa_sink_put() would
2542 * otherwise work, but currently pa_sink_suspend() will crash if
2543 * pa_sink_put() hasn't been called. */
2544 if (u->sink->active_port) {
2545 pa_alsa_port_data *port_data;
2547 port_data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2549 if (port_data->suspend_when_unavailable && u->sink->active_port->available == PA_AVAILABLE_NO)
2550 pa_sink_suspend(u->sink, true, PA_SUSPEND_UNAVAILABLE);
2556 pa_xfree(thread_name);
2562 pa_alsa_profile_set_free(profile_set);
2567 static void userdata_free(struct userdata *u) {
2571 pa_sink_unlink(u->sink);
2574 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2575 pa_thread_free(u->thread);
2578 pa_thread_mq_done(&u->thread_mq);
2581 pa_sink_unref(u->sink);
2583 if (u->memchunk.memblock)
2584 pa_memblock_unref(u->memchunk.memblock);
2587 pa_alsa_mixer_pdata_free(u->mixer_pd);
2589 if (u->alsa_rtpoll_item)
2590 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2593 pa_rtpoll_free(u->rtpoll);
2595 if (u->pcm_handle) {
2596 snd_pcm_drop(u->pcm_handle);
2597 snd_pcm_close(u->pcm_handle);
2601 pa_alsa_fdlist_free(u->mixer_fdl);
2603 if (u->mixer_path && !u->mixer_path_set)
2604 pa_alsa_path_free(u->mixer_path);
2606 if (u->mixer_handle)
2607 snd_mixer_close(u->mixer_handle);
2610 pa_smoother_free(u->smoother);
2613 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2621 pa_xfree(u->device_name);
2622 pa_xfree(u->control_device);
2623 pa_xfree(u->paths_dir);
2627 void pa_alsa_sink_free(pa_sink *s) {
2630 pa_sink_assert_ref(s);
2631 pa_assert_se(u = s->userdata);