2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <asoundlib.h>
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <pulse/rtclock.h>
35 #include <pulse/timeval.h>
36 #include <pulse/volume.h>
37 #include <pulse/xmalloc.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/core.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/thread-mq.h>
53 #include <pulsecore/rtpoll.h>
54 #include <pulsecore/time-smoother.h>
56 #include <modules/reserve-wrap.h>
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
61 /* #define DEBUG_TIMING */
63 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
69 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
70 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
71 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
72 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
75 * will increase the watermark only if we hit a real underrun. */
77 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
78 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
81 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
84 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
89 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91 #define DEFAULT_WRITE_ITERATION_THRESHOLD 0.03 /* don't iterate write if < 3% of the buffer is available */
99 pa_thread_mq thread_mq;
102 snd_pcm_t *pcm_handle;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
111 pa_cvolume hardware_volume;
120 tsched_watermark_ref,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
130 snd_pcm_uframes_t frames_per_block;
132 pa_usec_t watermark_dec_not_before;
133 pa_usec_t min_latency_ref;
134 pa_usec_t tsched_watermark_usec;
136 pa_memchunk memchunk;
138 char *device_name; /* name of the PCM device */
139 char *control_device; /* name of the control device */
141 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
143 bool first, after_rewind;
145 pa_rtpoll_item *alsa_rtpoll_item;
147 pa_smoother *smoother;
148 uint64_t write_count;
149 uint64_t since_start;
150 pa_usec_t smoother_interval;
151 pa_usec_t last_smoother_update;
155 pa_reserve_wrapper *reserve;
156 pa_hook_slot *reserve_slot;
157 pa_reserve_monitor_wrapper *monitor;
158 pa_hook_slot *monitor_slot;
161 pa_alsa_ucm_mapping_context *ucm_context;
164 static void userdata_free(struct userdata *u);
166 /* FIXME: Is there a better way to do this than device names? */
167 static bool is_iec958(struct userdata *u) {
168 return (strncmp("iec958", u->device_name, 6) == 0);
171 static bool is_hdmi(struct userdata *u) {
172 return (strncmp("hdmi", u->device_name, 4) == 0);
175 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
179 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
181 if (pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION) < 0)
182 return PA_HOOK_CANCEL;
187 static void reserve_done(struct userdata *u) {
190 if (u->reserve_slot) {
191 pa_hook_slot_free(u->reserve_slot);
192 u->reserve_slot = NULL;
196 pa_reserve_wrapper_unref(u->reserve);
201 static void reserve_update(struct userdata *u) {
202 const char *description;
205 if (!u->sink || !u->reserve)
208 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
209 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
212 static int reserve_init(struct userdata *u, const char *dname) {
221 if (pa_in_system_mode())
224 if (!(rname = pa_alsa_get_reserve_name(dname)))
227 /* We are resuming, try to lock the device */
228 u->reserve = pa_reserve_wrapper_get(u->core, rname);
236 pa_assert(!u->reserve_slot);
237 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
242 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
246 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
247 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
248 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
250 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
251 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
257 static void monitor_done(struct userdata *u) {
260 if (u->monitor_slot) {
261 pa_hook_slot_free(u->monitor_slot);
262 u->monitor_slot = NULL;
266 pa_reserve_monitor_wrapper_unref(u->monitor);
271 static int reserve_monitor_init(struct userdata *u, const char *dname) {
277 if (pa_in_system_mode())
280 if (!(rname = pa_alsa_get_reserve_name(dname)))
283 /* We are resuming, try to lock the device */
284 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
290 pa_assert(!u->monitor_slot);
291 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
296 static void fix_min_sleep_wakeup(struct userdata *u) {
297 size_t max_use, max_use_2;
300 pa_assert(u->use_tsched);
302 max_use = u->hwbuf_size - u->hwbuf_unused;
303 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
305 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
306 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
308 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
309 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
312 static void fix_tsched_watermark(struct userdata *u) {
315 pa_assert(u->use_tsched);
317 max_use = u->hwbuf_size - u->hwbuf_unused;
319 if (u->tsched_watermark > max_use - u->min_sleep)
320 u->tsched_watermark = max_use - u->min_sleep;
322 if (u->tsched_watermark < u->min_wakeup)
323 u->tsched_watermark = u->min_wakeup;
325 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
328 static void increase_watermark(struct userdata *u) {
329 size_t old_watermark;
330 pa_usec_t old_min_latency, new_min_latency;
333 pa_assert(u->use_tsched);
335 /* First, just try to increase the watermark */
336 old_watermark = u->tsched_watermark;
337 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
338 fix_tsched_watermark(u);
340 if (old_watermark != u->tsched_watermark) {
341 pa_log_info("Increasing wakeup watermark to %0.2f ms",
342 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
346 /* Hmm, we cannot increase the watermark any further, hence let's
347 raise the latency, unless doing so was disabled in
349 if (u->fixed_latency_range)
352 old_min_latency = u->sink->thread_info.min_latency;
353 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
354 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
356 if (old_min_latency != new_min_latency) {
357 pa_log_info("Increasing minimal latency to %0.2f ms",
358 (double) new_min_latency / PA_USEC_PER_MSEC);
360 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
363 /* When we reach this we're officially fucked! */
366 static void decrease_watermark(struct userdata *u) {
367 size_t old_watermark;
371 pa_assert(u->use_tsched);
373 now = pa_rtclock_now();
375 if (u->watermark_dec_not_before <= 0)
378 if (u->watermark_dec_not_before > now)
381 old_watermark = u->tsched_watermark;
383 if (u->tsched_watermark < u->watermark_dec_step)
384 u->tsched_watermark = u->tsched_watermark / 2;
386 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
388 fix_tsched_watermark(u);
390 if (old_watermark != u->tsched_watermark)
391 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
392 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
394 /* We don't change the latency range*/
397 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
400 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
403 pa_assert(sleep_usec);
404 pa_assert(process_usec);
407 pa_assert(u->use_tsched);
409 usec = pa_sink_get_requested_latency_within_thread(u->sink);
411 if (usec == (pa_usec_t) -1)
412 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
414 wm = u->tsched_watermark_usec;
419 *sleep_usec = usec - wm;
423 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
424 (unsigned long) (usec / PA_USEC_PER_MSEC),
425 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
426 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
430 static int try_recover(struct userdata *u, const char *call, int err) {
435 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
437 pa_assert(err != -EAGAIN);
440 pa_log_debug("%s: Buffer underrun!", call);
442 if (err == -ESTRPIPE)
443 pa_log_debug("%s: System suspended!", call);
445 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
446 pa_log("%s: %s", call, pa_alsa_strerror(err));
455 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, bool on_timeout) {
457 bool underrun = false;
459 /* We use <= instead of < for this check here because an underrun
460 * only happens after the last sample was processed, not already when
461 * it is removed from the buffer. This is particularly important
462 * when block transfer is used. */
464 if (n_bytes <= u->hwbuf_size)
465 left_to_play = u->hwbuf_size - n_bytes;
468 /* We got a dropout. What a mess! */
476 if (!u->first && !u->after_rewind)
477 if (pa_log_ratelimit(PA_LOG_INFO))
478 pa_log_info("Underrun!");
482 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
483 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
484 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
485 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
489 bool reset_not_before = true;
491 if (!u->first && !u->after_rewind) {
492 if (underrun || left_to_play < u->watermark_inc_threshold)
493 increase_watermark(u);
494 else if (left_to_play > u->watermark_dec_threshold) {
495 reset_not_before = false;
497 /* We decrease the watermark only if have actually
498 * been woken up by a timeout. If something else woke
499 * us up it's too easy to fulfill the deadlines... */
502 decrease_watermark(u);
506 if (reset_not_before)
507 u->watermark_dec_not_before = 0;
513 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
514 bool work_done = false;
515 pa_usec_t max_sleep_usec = 0, process_usec = 0;
516 size_t left_to_play, input_underrun;
520 pa_sink_assert_ref(u->sink);
523 hw_sleep_time(u, &max_sleep_usec, &process_usec);
529 bool after_avail = true;
531 /* First we determine how many samples are missing to fill the
532 * buffer up to 100% */
534 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
536 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
542 n_bytes = (size_t) n * u->frame_size;
545 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
548 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
553 /* We won't fill up the playback buffer before at least
554 * half the sleep time is over because otherwise we might
555 * ask for more data from the clients then they expect. We
556 * need to guarantee that clients only have to keep around
557 * a single hw buffer length. */
560 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
562 pa_log_debug("Not filling up, because too early.");
567 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
571 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
572 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write.\n"
573 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
574 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
580 pa_log_debug("Not filling up, because not necessary.");
589 pa_log_debug("Not filling up, because already too many iterations.");
593 } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
595 pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
600 n_bytes -= u->hwbuf_unused;
604 pa_log_debug("Filling up");
611 const snd_pcm_channel_area_t *areas;
612 snd_pcm_uframes_t offset, frames;
613 snd_pcm_sframes_t sframes;
616 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
617 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
619 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
621 if (!after_avail && err == -EAGAIN)
624 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
630 /* Make sure that if these memblocks need to be copied they will fit into one slot */
631 frames = PA_MIN(frames, u->frames_per_block);
633 if (!after_avail && frames == 0)
636 pa_assert(frames > 0);
639 /* Check these are multiples of 8 bit */
640 pa_assert((areas[0].first & 7) == 0);
641 pa_assert((areas[0].step & 7) == 0);
643 /* We assume a single interleaved memory buffer */
644 pa_assert((areas[0].first >> 3) == 0);
645 pa_assert((areas[0].step >> 3) == u->frame_size);
647 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
649 written = frames * u->frame_size;
650 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, true);
651 chunk.length = pa_memblock_get_length(chunk.memblock);
654 pa_sink_render_into_full(u->sink, &chunk);
655 pa_memblock_unref_fixed(chunk.memblock);
657 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
659 if ((int) sframes == -EAGAIN)
662 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
670 u->write_count += written;
671 u->since_start += written;
674 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
677 if (written >= n_bytes)
684 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
687 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
689 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
690 process_usec = u->tsched_watermark_usec;
692 if (*sleep_usec > process_usec)
693 *sleep_usec -= process_usec;
697 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
701 return work_done ? 1 : 0;
704 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
705 bool work_done = false;
706 pa_usec_t max_sleep_usec = 0, process_usec = 0;
707 size_t left_to_play, input_underrun;
711 pa_sink_assert_ref(u->sink);
714 hw_sleep_time(u, &max_sleep_usec, &process_usec);
720 bool after_avail = true;
722 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
724 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
730 n_bytes = (size_t) n * u->frame_size;
733 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
736 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
741 /* We won't fill up the playback buffer before at least
742 * half the sleep time is over because otherwise we might
743 * ask for more data from the clients then they expect. We
744 * need to guarantee that clients only have to keep around
745 * a single hw buffer length. */
748 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
751 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
755 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
756 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
757 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
758 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
770 pa_log_debug("Not filling up, because already too many iterations.");
774 } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
776 pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
781 n_bytes -= u->hwbuf_unused;
785 snd_pcm_sframes_t frames;
789 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
791 if (u->memchunk.length <= 0)
792 pa_sink_render(u->sink, n_bytes, &u->memchunk);
794 pa_assert(u->memchunk.length > 0);
796 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
798 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
799 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
801 p = pa_memblock_acquire(u->memchunk.memblock);
802 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
803 pa_memblock_release(u->memchunk.memblock);
805 if (PA_UNLIKELY(frames < 0)) {
807 if (!after_avail && (int) frames == -EAGAIN)
810 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
816 if (!after_avail && frames == 0)
819 pa_assert(frames > 0);
822 written = frames * u->frame_size;
823 u->memchunk.index += written;
824 u->memchunk.length -= written;
826 if (u->memchunk.length <= 0) {
827 pa_memblock_unref(u->memchunk.memblock);
828 pa_memchunk_reset(&u->memchunk);
833 u->write_count += written;
834 u->since_start += written;
836 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
838 if (written >= n_bytes)
845 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
848 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
850 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
851 process_usec = u->tsched_watermark_usec;
853 if (*sleep_usec > process_usec)
854 *sleep_usec -= process_usec;
858 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
862 return work_done ? 1 : 0;
865 static void update_smoother(struct userdata *u) {
866 snd_pcm_sframes_t delay = 0;
869 pa_usec_t now1 = 0, now2;
870 snd_pcm_status_t *status;
871 snd_htimestamp_t htstamp = { 0, 0 };
873 snd_pcm_status_alloca(&status);
876 pa_assert(u->pcm_handle);
878 /* Let's update the time smoother */
880 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, false)) < 0)) {
881 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
885 snd_pcm_status_get_htstamp(status, &htstamp);
886 now1 = pa_timespec_load(&htstamp);
888 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
890 now1 = pa_rtclock_now();
892 /* check if the time since the last update is bigger than the interval */
893 if (u->last_smoother_update > 0)
894 if (u->last_smoother_update + u->smoother_interval > now1)
897 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
899 if (PA_UNLIKELY(position < 0))
902 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
904 pa_smoother_put(u->smoother, now1, now2);
906 u->last_smoother_update = now1;
907 /* exponentially increase the update interval up to the MAX limit */
908 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
911 static int64_t sink_get_latency(struct userdata *u) {
913 pa_usec_t now1, now2;
917 now1 = pa_rtclock_now();
918 now2 = pa_smoother_get(u->smoother, now1);
920 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
922 if (u->memchunk.memblock)
923 delay += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
928 static int build_pollfd(struct userdata *u) {
930 pa_assert(u->pcm_handle);
932 if (u->alsa_rtpoll_item)
933 pa_rtpoll_item_free(u->alsa_rtpoll_item);
935 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
941 /* Called from IO context */
942 static int suspend(struct userdata *u) {
944 pa_assert(u->pcm_handle);
946 pa_smoother_pause(u->smoother, pa_rtclock_now());
948 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
949 * take awfully long with our long buffer sizes today. */
950 snd_pcm_close(u->pcm_handle);
951 u->pcm_handle = NULL;
953 if (u->alsa_rtpoll_item) {
954 pa_rtpoll_item_free(u->alsa_rtpoll_item);
955 u->alsa_rtpoll_item = NULL;
958 /* We reset max_rewind/max_request here to make sure that while we
959 * are suspended the old max_request/max_rewind values set before
960 * the suspend can influence the per-stream buffer of newly
961 * created streams, without their requirements having any
962 * influence on them. */
963 pa_sink_set_max_rewind_within_thread(u->sink, 0);
964 pa_sink_set_max_request_within_thread(u->sink, 0);
966 pa_log_info("Device suspended...");
971 /* Called from IO context */
972 static int update_sw_params(struct userdata *u) {
973 snd_pcm_uframes_t avail_min;
978 /* Use the full buffer if no one asked us for anything specific */
984 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
987 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
989 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
991 /* We need at least one sample in our buffer */
993 if (PA_UNLIKELY(b < u->frame_size))
996 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
999 fix_min_sleep_wakeup(u);
1000 fix_tsched_watermark(u);
1003 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
1005 /* We need at last one frame in the used part of the buffer */
1006 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
1008 if (u->use_tsched) {
1009 pa_usec_t sleep_usec, process_usec;
1011 hw_sleep_time(u, &sleep_usec, &process_usec);
1012 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1015 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1017 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1018 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1022 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1023 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1024 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1026 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1027 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1033 /* Called from IO Context on unsuspend or from main thread when creating sink */
1034 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1036 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->sink->sample_spec);
1038 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1039 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1041 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1042 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1044 fix_min_sleep_wakeup(u);
1045 fix_tsched_watermark(u);
1048 pa_sink_set_latency_range_within_thread(u->sink,
1050 pa_bytes_to_usec(u->hwbuf_size, ss));
1052 pa_sink_set_latency_range(u->sink,
1054 pa_bytes_to_usec(u->hwbuf_size, ss));
1056 /* work-around assert in pa_sink_set_latency_within_thead,
1057 keep track of min_latency and reuse it when
1058 this routine is called from IO context */
1059 u->min_latency_ref = u->sink->thread_info.min_latency;
1062 pa_log_info("Time scheduling watermark is %0.2fms",
1063 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1066 /* Called from IO context */
1067 static int unsuspend(struct userdata *u) {
1071 snd_pcm_uframes_t period_size, buffer_size;
1072 char *device_name = NULL;
1075 pa_assert(!u->pcm_handle);
1077 pa_log_info("Trying resume...");
1079 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1080 /* Need to open device in NONAUDIO mode */
1081 int len = strlen(u->device_name) + 8;
1083 device_name = pa_xmalloc(len);
1084 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1087 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1089 SND_PCM_NO_AUTO_RESAMPLE|
1090 SND_PCM_NO_AUTO_CHANNELS|
1091 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1092 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1096 ss = u->sink->sample_spec;
1097 period_size = u->fragment_size / u->frame_size;
1098 buffer_size = u->hwbuf_size / u->frame_size;
1102 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
1103 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1107 if (b != u->use_mmap || d != u->use_tsched) {
1108 pa_log_warn("Resume failed, couldn't get original access mode.");
1112 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1113 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1117 if (period_size*u->frame_size != u->fragment_size ||
1118 buffer_size*u->frame_size != u->hwbuf_size) {
1119 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1120 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1121 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1125 if (update_sw_params(u) < 0)
1128 if (build_pollfd(u) < 0)
1132 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1133 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1134 u->last_smoother_update = 0;
1139 /* reset the watermark to the value defined when sink was created */
1141 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, true);
1143 pa_log_info("Resumed successfully...");
1145 pa_xfree(device_name);
1149 if (u->pcm_handle) {
1150 snd_pcm_close(u->pcm_handle);
1151 u->pcm_handle = NULL;
1154 pa_xfree(device_name);
1159 /* Called from IO context */
1160 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1161 struct userdata *u = PA_SINK(o)->userdata;
1165 case PA_SINK_MESSAGE_GET_LATENCY: {
1169 r = sink_get_latency(u);
1171 *((int64_t*) data) = r;
1176 case PA_SINK_MESSAGE_SET_STATE:
1178 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1180 case PA_SINK_SUSPENDED: {
1183 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1185 if ((r = suspend(u)) < 0)
1192 case PA_SINK_RUNNING: {
1195 if (u->sink->thread_info.state == PA_SINK_INIT) {
1196 if (build_pollfd(u) < 0)
1200 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1201 if ((r = unsuspend(u)) < 0)
1208 case PA_SINK_UNLINKED:
1210 case PA_SINK_INVALID_STATE:
1217 return pa_sink_process_msg(o, code, data, offset, chunk);
1220 /* Called from main context */
1221 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1222 pa_sink_state_t old_state;
1225 pa_sink_assert_ref(s);
1226 pa_assert_se(u = s->userdata);
1228 old_state = pa_sink_get_state(u->sink);
1230 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1232 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1233 if (reserve_init(u, u->device_name) < 0)
1234 return -PA_ERR_BUSY;
1239 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1240 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1243 pa_assert(u->mixer_handle);
1245 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1248 if (!PA_SINK_IS_LINKED(u->sink->state))
1251 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1252 pa_sink_set_mixer_dirty(u->sink, true);
1256 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1257 pa_sink_get_volume(u->sink, true);
1258 pa_sink_get_mute(u->sink, true);
1264 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1265 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1268 pa_assert(u->mixer_handle);
1270 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1273 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1274 pa_sink_set_mixer_dirty(u->sink, true);
1278 if (mask & SND_CTL_EVENT_MASK_VALUE)
1279 pa_sink_update_volume_and_mute(u->sink);
1284 static void sink_get_volume_cb(pa_sink *s) {
1285 struct userdata *u = s->userdata;
1287 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1290 pa_assert(u->mixer_path);
1291 pa_assert(u->mixer_handle);
1293 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1296 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1297 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1299 pa_log_debug("Read hardware volume: %s",
1300 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1302 if (pa_cvolume_equal(&u->hardware_volume, &r))
1305 s->real_volume = u->hardware_volume = r;
1307 /* Hmm, so the hardware volume changed, let's reset our software volume */
1308 if (u->mixer_path->has_dB)
1309 pa_sink_set_soft_volume(s, NULL);
1312 static void sink_set_volume_cb(pa_sink *s) {
1313 struct userdata *u = s->userdata;
1315 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1316 bool deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1319 pa_assert(u->mixer_path);
1320 pa_assert(u->mixer_handle);
1322 /* Shift up by the base volume */
1323 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1325 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1328 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1329 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1331 u->hardware_volume = r;
1333 if (u->mixer_path->has_dB) {
1334 pa_cvolume new_soft_volume;
1335 bool accurate_enough;
1337 /* Match exactly what the user requested by software */
1338 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1340 /* If the adjustment to do in software is only minimal we
1341 * can skip it. That saves us CPU at the expense of a bit of
1344 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1345 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1347 pa_log_debug("Requested volume: %s",
1348 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1349 pa_log_debug("Got hardware volume: %s",
1350 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1351 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1352 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1353 pa_yes_no(accurate_enough));
1355 if (!accurate_enough)
1356 s->soft_volume = new_soft_volume;
1359 pa_log_debug("Wrote hardware volume: %s",
1360 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1362 /* We can't match exactly what the user requested, hence let's
1363 * at least tell the user about it */
1369 static void sink_write_volume_cb(pa_sink *s) {
1370 struct userdata *u = s->userdata;
1371 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1374 pa_assert(u->mixer_path);
1375 pa_assert(u->mixer_handle);
1376 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1378 /* Shift up by the base volume */
1379 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1381 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1382 pa_log_error("Writing HW volume failed");
1385 bool accurate_enough;
1387 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1388 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1390 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1392 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1393 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1395 if (!accurate_enough) {
1396 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1398 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1399 pa_cvolume_snprint_verbose(volume_buf[0],
1400 sizeof(volume_buf[0]),
1401 &s->thread_info.current_hw_volume,
1404 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1409 static int sink_get_mute_cb(pa_sink *s, bool *mute) {
1410 struct userdata *u = s->userdata;
1413 pa_assert(u->mixer_path);
1414 pa_assert(u->mixer_handle);
1416 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1422 static void sink_set_mute_cb(pa_sink *s) {
1423 struct userdata *u = s->userdata;
1426 pa_assert(u->mixer_path);
1427 pa_assert(u->mixer_handle);
1429 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1432 static void mixer_volume_init(struct userdata *u) {
1435 if (!u->mixer_path->has_volume) {
1436 pa_sink_set_write_volume_callback(u->sink, NULL);
1437 pa_sink_set_get_volume_callback(u->sink, NULL);
1438 pa_sink_set_set_volume_callback(u->sink, NULL);
1440 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1442 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1443 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1445 if (u->mixer_path->has_dB && u->deferred_volume) {
1446 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1447 pa_log_info("Successfully enabled deferred volume.");
1449 pa_sink_set_write_volume_callback(u->sink, NULL);
1451 if (u->mixer_path->has_dB) {
1452 pa_sink_enable_decibel_volume(u->sink, true);
1453 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1455 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1456 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1458 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1460 pa_sink_enable_decibel_volume(u->sink, false);
1461 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1463 u->sink->base_volume = PA_VOLUME_NORM;
1464 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1467 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1470 if (!u->mixer_path->has_mute) {
1471 pa_sink_set_get_mute_callback(u->sink, NULL);
1472 pa_sink_set_set_mute_callback(u->sink, NULL);
1473 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1475 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1476 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1477 pa_log_info("Using hardware mute control.");
1481 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1482 struct userdata *u = s->userdata;
1486 pa_assert(u->ucm_context);
1488 return pa_alsa_ucm_set_port(u->ucm_context, p, true);
1491 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1492 struct userdata *u = s->userdata;
1493 pa_alsa_port_data *data;
1497 pa_assert(u->mixer_handle);
1499 data = PA_DEVICE_PORT_DATA(p);
1501 pa_assert_se(u->mixer_path = data->path);
1502 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1504 mixer_volume_init(u);
1508 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1509 if (s->write_volume)
1519 static void sink_update_requested_latency_cb(pa_sink *s) {
1520 struct userdata *u = s->userdata;
1523 pa_assert(u->use_tsched); /* only when timer scheduling is used
1524 * we can dynamically adjust the
1530 before = u->hwbuf_unused;
1531 update_sw_params(u);
1533 /* Let's check whether we now use only a smaller part of the
1534 buffer then before. If so, we need to make sure that subsequent
1535 rewinds are relative to the new maximum fill level and not to the
1536 current fill level. Thus, let's do a full rewind once, to clear
1539 if (u->hwbuf_unused > before) {
1540 pa_log_debug("Requesting rewind due to latency change.");
1541 pa_sink_request_rewind(s, (size_t) -1);
1545 static pa_idxset* sink_get_formats(pa_sink *s) {
1546 struct userdata *u = s->userdata;
1550 return pa_idxset_copy(u->formats, (pa_copy_func_t) pa_format_info_copy);
1553 static bool sink_set_formats(pa_sink *s, pa_idxset *formats) {
1554 struct userdata *u = s->userdata;
1555 pa_format_info *f, *g;
1560 /* FIXME: also validate sample rates against what the device supports */
1561 PA_IDXSET_FOREACH(f, formats, idx) {
1562 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1563 /* EAC3 cannot be sent over over S/PDIF */
1567 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1568 u->formats = pa_idxset_new(NULL, NULL);
1570 /* Note: the logic below won't apply if we're using software encoding.
1571 * This is fine for now since we don't support that via the passthrough
1572 * framework, but this must be changed if we do. */
1574 /* Count how many sample rates we support */
1575 for (idx = 0, n = 0; u->rates[idx]; idx++)
1578 /* First insert non-PCM formats since we prefer those. */
1579 PA_IDXSET_FOREACH(f, formats, idx) {
1580 if (!pa_format_info_is_pcm(f)) {
1581 g = pa_format_info_copy(f);
1582 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1583 pa_idxset_put(u->formats, g, NULL);
1587 /* Now add any PCM formats */
1588 PA_IDXSET_FOREACH(f, formats, idx) {
1589 if (pa_format_info_is_pcm(f)) {
1590 /* We don't set rates here since we'll just tack on a resampler for
1591 * unsupported rates */
1592 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1599 static int sink_update_rate_cb(pa_sink *s, uint32_t rate) {
1600 struct userdata *u = s->userdata;
1602 bool supported = false;
1606 for (i = 0; u->rates[i]; i++) {
1607 if (u->rates[i] == rate) {
1614 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1618 if (!PA_SINK_IS_OPENED(s->state)) {
1619 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1620 u->sink->sample_spec.rate = rate;
1627 static int process_rewind(struct userdata *u) {
1628 snd_pcm_sframes_t unused;
1629 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1632 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1633 pa_sink_process_rewind(u->sink, 0);
1637 /* Figure out how much we shall rewind and reset the counter */
1638 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1640 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1642 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1643 if (try_recover(u, "snd_pcm_avail", (int) unused) < 0) {
1644 pa_log_warn("Trying to recover from underrun failed during rewind");
1649 unused_nbytes = (size_t) unused * u->frame_size;
1651 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1652 unused_nbytes += u->rewind_safeguard;
1654 if (u->hwbuf_size > unused_nbytes)
1655 limit_nbytes = u->hwbuf_size - unused_nbytes;
1659 if (rewind_nbytes > limit_nbytes)
1660 rewind_nbytes = limit_nbytes;
1662 if (rewind_nbytes > 0) {
1663 snd_pcm_sframes_t in_frames, out_frames;
1665 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1667 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1668 pa_log_debug("before: %lu", (unsigned long) in_frames);
1669 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1670 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1671 if (try_recover(u, "process_rewind", out_frames) < 0)
1676 pa_log_debug("after: %lu", (unsigned long) out_frames);
1678 rewind_nbytes = (size_t) out_frames * u->frame_size;
1680 if (rewind_nbytes <= 0)
1681 pa_log_info("Tried rewind, but was apparently not possible.");
1683 u->write_count -= rewind_nbytes;
1684 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1685 pa_sink_process_rewind(u->sink, rewind_nbytes);
1687 u->after_rewind = true;
1691 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1693 pa_sink_process_rewind(u->sink, 0);
1697 static void thread_func(void *userdata) {
1698 struct userdata *u = userdata;
1699 unsigned short revents = 0;
1703 pa_log_debug("Thread starting up");
1705 if (u->core->realtime_scheduling)
1706 pa_make_realtime(u->core->realtime_priority);
1708 pa_thread_mq_install(&u->thread_mq);
1712 pa_usec_t rtpoll_sleep = 0, real_sleep;
1715 pa_log_debug("Loop");
1718 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1719 if (process_rewind(u) < 0)
1723 /* Render some data and write it to the dsp */
1724 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1726 pa_usec_t sleep_usec = 0;
1727 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1730 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1732 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1737 /* pa_log_debug("work_done = %i", work_done); */
1742 pa_log_info("Starting playback.");
1743 snd_pcm_start(u->pcm_handle);
1745 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1753 if (u->use_tsched) {
1756 if (u->since_start <= u->hwbuf_size) {
1758 /* USB devices on ALSA seem to hit a buffer
1759 * underrun during the first iterations much
1760 * quicker then we calculate here, probably due to
1761 * the transport latency. To accommodate for that
1762 * we artificially decrease the sleep time until
1763 * we have filled the buffer at least once
1766 if (pa_log_ratelimit(PA_LOG_DEBUG))
1767 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1771 /* OK, the playback buffer is now full, let's
1772 * calculate when to wake up next */
1774 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1777 /* Convert from the sound card time domain to the
1778 * system time domain */
1779 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1782 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1785 /* We don't trust the conversion, so we wake up whatever comes first */
1786 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1789 u->after_rewind = false;
1793 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1794 pa_usec_t volume_sleep;
1795 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1796 if (volume_sleep > 0) {
1797 if (rtpoll_sleep > 0)
1798 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1800 rtpoll_sleep = volume_sleep;
1804 if (rtpoll_sleep > 0) {
1805 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1806 real_sleep = pa_rtclock_now();
1809 pa_rtpoll_set_timer_disabled(u->rtpoll);
1811 /* Hmm, nothing to do. Let's sleep */
1812 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1815 if (rtpoll_sleep > 0) {
1816 real_sleep = pa_rtclock_now() - real_sleep;
1818 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1819 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1820 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1822 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1823 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1824 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1825 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1828 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1829 pa_sink_volume_change_apply(u->sink, NULL);
1834 /* Tell ALSA about this and process its response */
1835 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1836 struct pollfd *pollfd;
1840 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1842 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1843 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1847 if (revents & ~POLLOUT) {
1848 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1854 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1855 pa_log_debug("Wakeup from ALSA!");
1862 /* If this was no regular exit from the loop we have to continue
1863 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1864 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1865 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1868 pa_log_debug("Thread shutting down");
1871 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1877 pa_assert(device_name);
1879 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1880 pa_sink_new_data_set_name(data, n);
1881 data->namereg_fail = true;
1885 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1886 data->namereg_fail = true;
1888 n = device_id ? device_id : device_name;
1889 data->namereg_fail = false;
1893 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1895 t = pa_sprintf_malloc("alsa_output.%s", n);
1897 pa_sink_new_data_set_name(data, t);
1901 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1902 if (!mapping && !element)
1905 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1906 pa_log_info("Failed to find a working mixer device.");
1912 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1915 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1918 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1919 pa_alsa_path_dump(u->mixer_path);
1920 } else if (!(u->mixer_path_set = mapping->output_path_set))
1927 if (u->mixer_path) {
1928 pa_alsa_path_free(u->mixer_path);
1929 u->mixer_path = NULL;
1932 if (u->mixer_handle) {
1933 snd_mixer_close(u->mixer_handle);
1934 u->mixer_handle = NULL;
1938 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1939 bool need_mixer_callback = false;
1943 if (!u->mixer_handle)
1946 if (u->sink->active_port) {
1947 pa_alsa_port_data *data;
1949 /* We have a list of supported paths, so let's activate the
1950 * one that has been chosen as active */
1952 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1953 u->mixer_path = data->path;
1955 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1959 if (!u->mixer_path && u->mixer_path_set)
1960 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1962 if (u->mixer_path) {
1963 /* Hmm, we have only a single path, then let's activate it */
1965 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1971 mixer_volume_init(u);
1973 /* Will we need to register callbacks? */
1974 if (u->mixer_path_set && u->mixer_path_set->paths) {
1978 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1979 if (p->has_volume || p->has_mute)
1980 need_mixer_callback = true;
1983 else if (u->mixer_path)
1984 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1986 if (need_mixer_callback) {
1987 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1988 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1989 u->mixer_pd = pa_alsa_mixer_pdata_new();
1990 mixer_callback = io_mixer_callback;
1992 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1993 pa_log("Failed to initialize file descriptor monitoring");
1997 u->mixer_fdl = pa_alsa_fdlist_new();
1998 mixer_callback = ctl_mixer_callback;
2000 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2001 pa_log("Failed to initialize file descriptor monitoring");
2006 if (u->mixer_path_set)
2007 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2009 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2015 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2017 struct userdata *u = NULL;
2018 const char *dev_id = NULL, *key, *mod_name;
2020 char *thread_name = NULL;
2021 uint32_t alternate_sample_rate;
2023 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2024 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2026 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, set_formats = false, fixed_latency_range = false;
2027 pa_sink_new_data data;
2030 pa_alsa_profile_set *profile_set = NULL;
2036 ss = m->core->default_sample_spec;
2037 map = m->core->default_channel_map;
2039 /* Pick sample spec overrides from the mapping, if any */
2041 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2042 ss.format = mapping->sample_spec.format;
2043 if (mapping->sample_spec.rate != 0)
2044 ss.rate = mapping->sample_spec.rate;
2045 if (mapping->sample_spec.channels != 0) {
2046 ss.channels = mapping->sample_spec.channels;
2047 if (pa_channel_map_valid(&mapping->channel_map))
2048 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2052 /* Override with modargs if provided */
2053 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2054 pa_log("Failed to parse sample specification and channel map");
2058 alternate_sample_rate = m->core->alternate_sample_rate;
2059 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2060 pa_log("Failed to parse alternate sample rate");
2064 frame_size = pa_frame_size(&ss);
2066 nfrags = m->core->default_n_fragments;
2067 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2069 frag_size = (uint32_t) frame_size;
2070 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2071 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2073 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2074 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2075 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2076 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2077 pa_log("Failed to parse buffer metrics");
2081 buffer_size = nfrags * frag_size;
2083 period_frames = frag_size/frame_size;
2084 buffer_frames = buffer_size/frame_size;
2085 tsched_frames = tsched_size/frame_size;
2087 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2088 pa_log("Failed to parse mmap argument.");
2092 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2093 pa_log("Failed to parse tsched argument.");
2097 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2098 pa_log("Failed to parse ignore_dB argument.");
2102 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2103 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2104 pa_log("Failed to parse rewind_safeguard argument");
2108 deferred_volume = m->core->deferred_volume;
2109 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2110 pa_log("Failed to parse deferred_volume argument.");
2114 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2115 pa_log("Failed to parse fixed_latency_range argument.");
2119 use_tsched = pa_alsa_may_tsched(use_tsched);
2121 u = pa_xnew0(struct userdata, 1);
2124 u->use_mmap = use_mmap;
2125 u->use_tsched = use_tsched;
2126 u->deferred_volume = deferred_volume;
2127 u->fixed_latency_range = fixed_latency_range;
2129 u->rewind_safeguard = rewind_safeguard;
2130 u->rtpoll = pa_rtpoll_new();
2132 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2133 pa_log("pa_thread_mq_init() failed.");
2137 u->smoother = pa_smoother_new(
2138 SMOOTHER_ADJUST_USEC,
2139 SMOOTHER_WINDOW_USEC,
2145 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2148 if (mapping && mapping->ucm_context.ucm)
2149 u->ucm_context = &mapping->ucm_context;
2151 dev_id = pa_modargs_get_value(
2153 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2155 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2157 if (reserve_init(u, dev_id) < 0)
2160 if (reserve_monitor_init(u, dev_id) < 0)
2166 /* Force ALSA to reread its configuration if module-alsa-card didn't
2167 * do it for us. This matters if our device was hot-plugged after ALSA
2168 * has already read its configuration - see
2169 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2173 snd_config_update_free_global();
2177 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2178 pa_log("device_id= not set");
2182 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2183 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2184 pa_log("Failed to enable ucm modifier %s", mod_name);
2186 pa_log_debug("Enabled ucm modifier %s", mod_name);
2189 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2193 SND_PCM_STREAM_PLAYBACK,
2194 &period_frames, &buffer_frames, tsched_frames,
2198 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2200 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2203 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2207 SND_PCM_STREAM_PLAYBACK,
2208 &period_frames, &buffer_frames, tsched_frames,
2209 &b, &d, profile_set, &mapping)))
2214 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2215 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2218 SND_PCM_STREAM_PLAYBACK,
2219 &period_frames, &buffer_frames, tsched_frames,
2224 pa_assert(u->device_name);
2225 pa_log_info("Successfully opened device %s.", u->device_name);
2227 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2228 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2233 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2235 if (use_mmap && !b) {
2236 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2237 u->use_mmap = use_mmap = false;
2240 if (use_tsched && (!b || !d)) {
2241 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2242 u->use_tsched = use_tsched = false;
2246 pa_log_info("Successfully enabled mmap() mode.");
2248 if (u->use_tsched) {
2249 pa_log_info("Successfully enabled timer-based scheduling mode.");
2251 if (u->fixed_latency_range)
2252 pa_log_info("Disabling latency range changes on underrun");
2255 /* All passthrough formats supported by PulseAudio require
2256 * IEC61937 framing with two fake channels. So, passthrough
2257 * clients will always send two channels. Multichannel sinks
2258 * cannot accept that, because nobody implemented sink channel count
2259 * switching so far. So just don't show known non-working settings
2261 if ((is_iec958(u) || is_hdmi(u)) && ss.channels == 2)
2264 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2266 pa_log_error("Failed to find any supported sample rates.");
2270 /* ALSA might tweak the sample spec, so recalculate the frame size */
2271 frame_size = pa_frame_size(&ss);
2273 if (!u->ucm_context)
2274 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2276 pa_sink_new_data_init(&data);
2277 data.driver = driver;
2280 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2282 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2283 * variable instead of using &data.namereg_fail directly, because
2284 * data.namereg_fail is a bitfield and taking the address of a bitfield
2285 * variable is impossible. */
2286 namereg_fail = data.namereg_fail;
2287 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2288 pa_log("Failed to parse namereg_fail argument.");
2289 pa_sink_new_data_done(&data);
2292 data.namereg_fail = namereg_fail;
2294 pa_sink_new_data_set_sample_spec(&data, &ss);
2295 pa_sink_new_data_set_channel_map(&data, &map);
2296 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2298 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2299 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2300 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2301 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2302 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2305 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2306 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2308 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2309 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2312 pa_alsa_init_description(data.proplist, card);
2314 if (u->control_device)
2315 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2317 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2318 pa_log("Invalid properties");
2319 pa_sink_new_data_done(&data);
2324 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, true, card);
2325 else if (u->mixer_path_set)
2326 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2328 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2329 (set_formats ? PA_SINK_SET_FORMATS : 0));
2330 volume_is_set = data.volume_is_set;
2331 mute_is_set = data.muted_is_set;
2332 pa_sink_new_data_done(&data);
2335 pa_log("Failed to create sink object");
2339 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2340 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2341 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2345 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2346 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2347 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2351 u->sink->parent.process_msg = sink_process_msg;
2353 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2354 u->sink->set_state = sink_set_state_cb;
2356 u->sink->set_port = sink_set_port_ucm_cb;
2358 u->sink->set_port = sink_set_port_cb;
2359 if (u->sink->alternate_sample_rate)
2360 u->sink->update_rate = sink_update_rate_cb;
2361 u->sink->userdata = u;
2363 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2364 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2366 u->frame_size = frame_size;
2367 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2368 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2369 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2370 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2372 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2373 (double) u->hwbuf_size / (double) u->fragment_size,
2374 (long unsigned) u->fragment_size,
2375 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2376 (long unsigned) u->hwbuf_size,
2377 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2379 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2380 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2381 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2383 pa_log_info("Disabling rewind for device %s", u->device_name);
2384 pa_sink_set_max_rewind(u->sink, 0);
2387 if (u->use_tsched) {
2388 u->tsched_watermark_ref = tsched_watermark;
2389 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2391 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2395 if (update_sw_params(u) < 0)
2398 if (u->ucm_context) {
2399 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, true) < 0)
2401 } else if (setup_mixer(u, ignore_dB) < 0)
2404 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2406 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2407 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2408 pa_log("Failed to create thread.");
2411 pa_xfree(thread_name);
2414 /* Get initial mixer settings */
2415 if (volume_is_set) {
2416 if (u->sink->set_volume)
2417 u->sink->set_volume(u->sink);
2419 if (u->sink->get_volume)
2420 u->sink->get_volume(u->sink);
2424 if (u->sink->set_mute)
2425 u->sink->set_mute(u->sink);
2427 if (u->sink->get_mute) {
2430 if (u->sink->get_mute(u->sink, &mute) >= 0)
2431 pa_sink_set_mute(u->sink, mute, false);
2435 if ((volume_is_set || mute_is_set) && u->sink->write_volume)
2436 u->sink->write_volume(u->sink);
2439 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2440 pa_format_info *format;
2442 /* To start with, we only support PCM formats. Other formats may be added
2443 * with pa_sink_set_formats().*/
2444 format = pa_format_info_new();
2445 format->encoding = PA_ENCODING_PCM;
2446 u->formats = pa_idxset_new(NULL, NULL);
2447 pa_idxset_put(u->formats, format, NULL);
2449 u->sink->get_formats = sink_get_formats;
2450 u->sink->set_formats = sink_set_formats;
2453 pa_sink_put(u->sink);
2456 pa_alsa_profile_set_free(profile_set);
2461 pa_xfree(thread_name);
2467 pa_alsa_profile_set_free(profile_set);
2472 static void userdata_free(struct userdata *u) {
2476 pa_sink_unlink(u->sink);
2479 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2480 pa_thread_free(u->thread);
2483 pa_thread_mq_done(&u->thread_mq);
2486 pa_sink_unref(u->sink);
2488 if (u->memchunk.memblock)
2489 pa_memblock_unref(u->memchunk.memblock);
2492 pa_alsa_mixer_pdata_free(u->mixer_pd);
2494 if (u->alsa_rtpoll_item)
2495 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2498 pa_rtpoll_free(u->rtpoll);
2500 if (u->pcm_handle) {
2501 snd_pcm_drop(u->pcm_handle);
2502 snd_pcm_close(u->pcm_handle);
2506 pa_alsa_fdlist_free(u->mixer_fdl);
2508 if (u->mixer_path && !u->mixer_path_set)
2509 pa_alsa_path_free(u->mixer_path);
2511 if (u->mixer_handle)
2512 snd_mixer_close(u->mixer_handle);
2515 pa_smoother_free(u->smoother);
2518 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2526 pa_xfree(u->device_name);
2527 pa_xfree(u->control_device);
2528 pa_xfree(u->paths_dir);
2532 void pa_alsa_sink_free(pa_sink *s) {
2535 pa_sink_assert_ref(s);
2536 pa_assert_se(u = s->userdata);