2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <asoundlib.h>
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
58 #include <modules/reserve-wrap.h>
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
63 /* #define DEBUG_TIMING */
65 #define DEFAULT_DEVICE "default"
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
99 pa_thread_mq thread_mq;
102 snd_pcm_t *pcm_handle;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
111 pa_cvolume hardware_volume;
120 tsched_watermark_ref,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
133 pa_memchunk memchunk;
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
140 pa_bool_t first, after_rewind;
142 pa_rtpoll_item *alsa_rtpoll_item;
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
158 static void userdata_free(struct userdata *u);
160 /* FIXME: Is there a better way to do this than device names? */
161 static pa_bool_t is_iec958(struct userdata *u) {
162 return (strncmp("iec958", u->device_name, 6) == 0);
165 static pa_bool_t is_hdmi(struct userdata *u) {
166 return (strncmp("hdmi", u->device_name, 4) == 0);
169 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
173 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
174 return PA_HOOK_CANCEL;
179 static void reserve_done(struct userdata *u) {
182 if (u->reserve_slot) {
183 pa_hook_slot_free(u->reserve_slot);
184 u->reserve_slot = NULL;
188 pa_reserve_wrapper_unref(u->reserve);
193 static void reserve_update(struct userdata *u) {
194 const char *description;
197 if (!u->sink || !u->reserve)
200 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
201 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
204 static int reserve_init(struct userdata *u, const char *dname) {
213 if (pa_in_system_mode())
216 if (!(rname = pa_alsa_get_reserve_name(dname)))
219 /* We are resuming, try to lock the device */
220 u->reserve = pa_reserve_wrapper_get(u->core, rname);
228 pa_assert(!u->reserve_slot);
229 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
234 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
240 b = PA_PTR_TO_UINT(busy) && !u->reserve;
242 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
246 static void monitor_done(struct userdata *u) {
249 if (u->monitor_slot) {
250 pa_hook_slot_free(u->monitor_slot);
251 u->monitor_slot = NULL;
255 pa_reserve_monitor_wrapper_unref(u->monitor);
260 static int reserve_monitor_init(struct userdata *u, const char *dname) {
266 if (pa_in_system_mode())
269 if (!(rname = pa_alsa_get_reserve_name(dname)))
272 /* We are resuming, try to lock the device */
273 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
279 pa_assert(!u->monitor_slot);
280 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
285 static void fix_min_sleep_wakeup(struct userdata *u) {
286 size_t max_use, max_use_2;
289 pa_assert(u->use_tsched);
291 max_use = u->hwbuf_size - u->hwbuf_unused;
292 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
294 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
295 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
297 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
298 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
301 static void fix_tsched_watermark(struct userdata *u) {
304 pa_assert(u->use_tsched);
306 max_use = u->hwbuf_size - u->hwbuf_unused;
308 if (u->tsched_watermark > max_use - u->min_sleep)
309 u->tsched_watermark = max_use - u->min_sleep;
311 if (u->tsched_watermark < u->min_wakeup)
312 u->tsched_watermark = u->min_wakeup;
315 static void increase_watermark(struct userdata *u) {
316 size_t old_watermark;
317 pa_usec_t old_min_latency, new_min_latency;
320 pa_assert(u->use_tsched);
322 /* First, just try to increase the watermark */
323 old_watermark = u->tsched_watermark;
324 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
325 fix_tsched_watermark(u);
327 if (old_watermark != u->tsched_watermark) {
328 pa_log_info("Increasing wakeup watermark to %0.2f ms",
329 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
333 /* Hmm, we cannot increase the watermark any further, hence let's
334 raise the latency, unless doing so was disabled in
336 if (u->fixed_latency_range)
339 old_min_latency = u->sink->thread_info.min_latency;
340 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
341 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
343 if (old_min_latency != new_min_latency) {
344 pa_log_info("Increasing minimal latency to %0.2f ms",
345 (double) new_min_latency / PA_USEC_PER_MSEC);
347 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
350 /* When we reach this we're officialy fucked! */
353 static void decrease_watermark(struct userdata *u) {
354 size_t old_watermark;
358 pa_assert(u->use_tsched);
360 now = pa_rtclock_now();
362 if (u->watermark_dec_not_before <= 0)
365 if (u->watermark_dec_not_before > now)
368 old_watermark = u->tsched_watermark;
370 if (u->tsched_watermark < u->watermark_dec_step)
371 u->tsched_watermark = u->tsched_watermark / 2;
373 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
375 fix_tsched_watermark(u);
377 if (old_watermark != u->tsched_watermark)
378 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
379 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
381 /* We don't change the latency range*/
384 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
387 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
390 pa_assert(sleep_usec);
391 pa_assert(process_usec);
394 pa_assert(u->use_tsched);
396 usec = pa_sink_get_requested_latency_within_thread(u->sink);
398 if (usec == (pa_usec_t) -1)
399 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
401 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
406 *sleep_usec = usec - wm;
410 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
411 (unsigned long) (usec / PA_USEC_PER_MSEC),
412 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
413 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
417 static int try_recover(struct userdata *u, const char *call, int err) {
422 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
424 pa_assert(err != -EAGAIN);
427 pa_log_debug("%s: Buffer underrun!", call);
429 if (err == -ESTRPIPE)
430 pa_log_debug("%s: System suspended!", call);
432 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
433 pa_log("%s: %s", call, pa_alsa_strerror(err));
442 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
444 pa_bool_t underrun = FALSE;
446 /* We use <= instead of < for this check here because an underrun
447 * only happens after the last sample was processed, not already when
448 * it is removed from the buffer. This is particularly important
449 * when block transfer is used. */
451 if (n_bytes <= u->hwbuf_size)
452 left_to_play = u->hwbuf_size - n_bytes;
455 /* We got a dropout. What a mess! */
463 if (!u->first && !u->after_rewind)
464 if (pa_log_ratelimit(PA_LOG_INFO))
465 pa_log_info("Underrun!");
469 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
470 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
471 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
472 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
476 pa_bool_t reset_not_before = TRUE;
478 if (!u->first && !u->after_rewind) {
479 if (underrun || left_to_play < u->watermark_inc_threshold)
480 increase_watermark(u);
481 else if (left_to_play > u->watermark_dec_threshold) {
482 reset_not_before = FALSE;
484 /* We decrease the watermark only if have actually
485 * been woken up by a timeout. If something else woke
486 * us up it's too easy to fulfill the deadlines... */
489 decrease_watermark(u);
493 if (reset_not_before)
494 u->watermark_dec_not_before = 0;
500 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
501 pa_bool_t work_done = FALSE;
502 pa_usec_t max_sleep_usec = 0, process_usec = 0;
507 pa_sink_assert_ref(u->sink);
510 hw_sleep_time(u, &max_sleep_usec, &process_usec);
516 pa_bool_t after_avail = TRUE;
518 /* First we determine how many samples are missing to fill the
519 * buffer up to 100% */
521 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
523 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
529 n_bytes = (size_t) n * u->frame_size;
532 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
535 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
540 /* We won't fill up the playback buffer before at least
541 * half the sleep time is over because otherwise we might
542 * ask for more data from the clients then they expect. We
543 * need to guarantee that clients only have to keep around
544 * a single hw buffer length. */
547 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
549 pa_log_debug("Not filling up, because too early.");
554 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
558 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
559 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
560 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
561 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
567 pa_log_debug("Not filling up, because not necessary.");
575 pa_log_debug("Not filling up, because already too many iterations.");
581 n_bytes -= u->hwbuf_unused;
585 pa_log_debug("Filling up");
592 const snd_pcm_channel_area_t *areas;
593 snd_pcm_uframes_t offset, frames;
594 snd_pcm_sframes_t sframes;
596 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
597 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
599 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
601 if (!after_avail && err == -EAGAIN)
604 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
610 /* Make sure that if these memblocks need to be copied they will fit into one slot */
611 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
612 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
614 if (!after_avail && frames == 0)
617 pa_assert(frames > 0);
620 /* Check these are multiples of 8 bit */
621 pa_assert((areas[0].first & 7) == 0);
622 pa_assert((areas[0].step & 7)== 0);
624 /* We assume a single interleaved memory buffer */
625 pa_assert((areas[0].first >> 3) == 0);
626 pa_assert((areas[0].step >> 3) == u->frame_size);
628 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
630 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
631 chunk.length = pa_memblock_get_length(chunk.memblock);
634 pa_sink_render_into_full(u->sink, &chunk);
635 pa_memblock_unref_fixed(chunk.memblock);
637 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
639 if (!after_avail && (int) sframes == -EAGAIN)
642 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
650 u->write_count += frames * u->frame_size;
651 u->since_start += frames * u->frame_size;
654 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
657 if ((size_t) frames * u->frame_size >= n_bytes)
660 n_bytes -= (size_t) frames * u->frame_size;
665 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
666 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
668 if (*sleep_usec > process_usec)
669 *sleep_usec -= process_usec;
675 return work_done ? 1 : 0;
678 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
679 pa_bool_t work_done = FALSE;
680 pa_usec_t max_sleep_usec = 0, process_usec = 0;
685 pa_sink_assert_ref(u->sink);
688 hw_sleep_time(u, &max_sleep_usec, &process_usec);
694 pa_bool_t after_avail = TRUE;
696 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
698 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
704 n_bytes = (size_t) n * u->frame_size;
705 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
710 /* We won't fill up the playback buffer before at least
711 * half the sleep time is over because otherwise we might
712 * ask for more data from the clients then they expect. We
713 * need to guarantee that clients only have to keep around
714 * a single hw buffer length. */
717 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
720 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
724 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
725 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
726 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
727 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
737 pa_log_debug("Not filling up, because already too many iterations.");
743 n_bytes -= u->hwbuf_unused;
747 snd_pcm_sframes_t frames;
750 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
752 if (u->memchunk.length <= 0)
753 pa_sink_render(u->sink, n_bytes, &u->memchunk);
755 pa_assert(u->memchunk.length > 0);
757 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
759 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
760 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
762 p = pa_memblock_acquire(u->memchunk.memblock);
763 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
764 pa_memblock_release(u->memchunk.memblock);
766 if (PA_UNLIKELY(frames < 0)) {
768 if (!after_avail && (int) frames == -EAGAIN)
771 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
777 if (!after_avail && frames == 0)
780 pa_assert(frames > 0);
783 u->memchunk.index += (size_t) frames * u->frame_size;
784 u->memchunk.length -= (size_t) frames * u->frame_size;
786 if (u->memchunk.length <= 0) {
787 pa_memblock_unref(u->memchunk.memblock);
788 pa_memchunk_reset(&u->memchunk);
793 u->write_count += frames * u->frame_size;
794 u->since_start += frames * u->frame_size;
796 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
798 if ((size_t) frames * u->frame_size >= n_bytes)
801 n_bytes -= (size_t) frames * u->frame_size;
806 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
807 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
809 if (*sleep_usec > process_usec)
810 *sleep_usec -= process_usec;
816 return work_done ? 1 : 0;
819 static void update_smoother(struct userdata *u) {
820 snd_pcm_sframes_t delay = 0;
823 pa_usec_t now1 = 0, now2;
824 snd_pcm_status_t *status;
826 snd_pcm_status_alloca(&status);
829 pa_assert(u->pcm_handle);
831 /* Let's update the time smoother */
833 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
834 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
838 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
839 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
841 snd_htimestamp_t htstamp = { 0, 0 };
842 snd_pcm_status_get_htstamp(status, &htstamp);
843 now1 = pa_timespec_load(&htstamp);
846 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
848 now1 = pa_rtclock_now();
850 /* check if the time since the last update is bigger than the interval */
851 if (u->last_smoother_update > 0)
852 if (u->last_smoother_update + u->smoother_interval > now1)
855 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
857 if (PA_UNLIKELY(position < 0))
860 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
862 pa_smoother_put(u->smoother, now1, now2);
864 u->last_smoother_update = now1;
865 /* exponentially increase the update interval up to the MAX limit */
866 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
869 static pa_usec_t sink_get_latency(struct userdata *u) {
872 pa_usec_t now1, now2;
876 now1 = pa_rtclock_now();
877 now2 = pa_smoother_get(u->smoother, now1);
879 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
881 r = delay >= 0 ? (pa_usec_t) delay : 0;
883 if (u->memchunk.memblock)
884 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
889 static int build_pollfd(struct userdata *u) {
891 pa_assert(u->pcm_handle);
893 if (u->alsa_rtpoll_item)
894 pa_rtpoll_item_free(u->alsa_rtpoll_item);
896 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
902 /* Called from IO context */
903 static int suspend(struct userdata *u) {
905 pa_assert(u->pcm_handle);
907 pa_smoother_pause(u->smoother, pa_rtclock_now());
909 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
910 * take awfully long with our long buffer sizes today. */
911 snd_pcm_close(u->pcm_handle);
912 u->pcm_handle = NULL;
914 if (u->alsa_rtpoll_item) {
915 pa_rtpoll_item_free(u->alsa_rtpoll_item);
916 u->alsa_rtpoll_item = NULL;
919 /* We reset max_rewind/max_request here to make sure that while we
920 * are suspended the old max_request/max_rewind values set before
921 * the suspend can influence the per-stream buffer of newly
922 * created streams, without their requirements having any
923 * influence on them. */
924 pa_sink_set_max_rewind_within_thread(u->sink, 0);
925 pa_sink_set_max_request_within_thread(u->sink, 0);
927 pa_log_info("Device suspended...");
932 /* Called from IO context */
933 static int update_sw_params(struct userdata *u) {
934 snd_pcm_uframes_t avail_min;
939 /* Use the full buffer if no one asked us for anything specific */
945 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
948 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
950 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
952 /* We need at least one sample in our buffer */
954 if (PA_UNLIKELY(b < u->frame_size))
957 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
960 fix_min_sleep_wakeup(u);
961 fix_tsched_watermark(u);
964 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
966 /* We need at last one frame in the used part of the buffer */
967 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
970 pa_usec_t sleep_usec, process_usec;
972 hw_sleep_time(u, &sleep_usec, &process_usec);
973 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
976 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
978 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
979 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
983 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
984 if (pa_alsa_pcm_is_hw(u->pcm_handle))
985 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
987 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
988 pa_sink_set_max_rewind_within_thread(u->sink, 0);
994 /* Called from IO Context on unsuspend or from main thread when creating sink */
995 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
998 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
999 &u->sink->sample_spec);
1001 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1002 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1004 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1005 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1007 fix_min_sleep_wakeup(u);
1008 fix_tsched_watermark(u);
1011 pa_sink_set_latency_range_within_thread(u->sink,
1013 pa_bytes_to_usec(u->hwbuf_size, ss));
1015 pa_sink_set_latency_range(u->sink,
1017 pa_bytes_to_usec(u->hwbuf_size, ss));
1019 /* work-around assert in pa_sink_set_latency_within_thead,
1020 keep track of min_latency and reuse it when
1021 this routine is called from IO context */
1022 u->min_latency_ref = u->sink->thread_info.min_latency;
1025 pa_log_info("Time scheduling watermark is %0.2fms",
1026 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1029 /* Called from IO context */
1030 static int unsuspend(struct userdata *u) {
1034 snd_pcm_uframes_t period_size, buffer_size;
1035 char *device_name = NULL;
1038 pa_assert(!u->pcm_handle);
1040 pa_log_info("Trying resume...");
1042 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1043 /* Need to open device in NONAUDIO mode */
1044 int len = strlen(u->device_name) + 8;
1046 device_name = pa_xmalloc(len);
1047 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1050 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1052 SND_PCM_NO_AUTO_RESAMPLE|
1053 SND_PCM_NO_AUTO_CHANNELS|
1054 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1055 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1059 ss = u->sink->sample_spec;
1060 period_size = u->fragment_size / u->frame_size;
1061 buffer_size = u->hwbuf_size / u->frame_size;
1065 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1066 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1070 if (b != u->use_mmap || d != u->use_tsched) {
1071 pa_log_warn("Resume failed, couldn't get original access mode.");
1075 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1076 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1080 if (period_size*u->frame_size != u->fragment_size ||
1081 buffer_size*u->frame_size != u->hwbuf_size) {
1082 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1083 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1084 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1088 if (update_sw_params(u) < 0)
1091 if (build_pollfd(u) < 0)
1095 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1096 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1097 u->last_smoother_update = 0;
1102 /* reset the watermark to the value defined when sink was created */
1104 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1106 pa_log_info("Resumed successfully...");
1108 pa_xfree(device_name);
1112 if (u->pcm_handle) {
1113 snd_pcm_close(u->pcm_handle);
1114 u->pcm_handle = NULL;
1117 pa_xfree(device_name);
1122 /* Called from IO context */
1123 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1124 struct userdata *u = PA_SINK(o)->userdata;
1128 case PA_SINK_MESSAGE_GET_LATENCY: {
1132 r = sink_get_latency(u);
1134 *((pa_usec_t*) data) = r;
1139 case PA_SINK_MESSAGE_SET_STATE:
1141 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1143 case PA_SINK_SUSPENDED: {
1146 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1148 if ((r = suspend(u)) < 0)
1155 case PA_SINK_RUNNING: {
1158 if (u->sink->thread_info.state == PA_SINK_INIT) {
1159 if (build_pollfd(u) < 0)
1163 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1164 if ((r = unsuspend(u)) < 0)
1171 case PA_SINK_UNLINKED:
1173 case PA_SINK_INVALID_STATE:
1180 return pa_sink_process_msg(o, code, data, offset, chunk);
1183 /* Called from main context */
1184 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1185 pa_sink_state_t old_state;
1188 pa_sink_assert_ref(s);
1189 pa_assert_se(u = s->userdata);
1191 old_state = pa_sink_get_state(u->sink);
1193 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1195 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1196 if (reserve_init(u, u->device_name) < 0)
1197 return -PA_ERR_BUSY;
1202 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1203 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1206 pa_assert(u->mixer_handle);
1208 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1211 if (!PA_SINK_IS_LINKED(u->sink->state))
1214 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1215 pa_sink_set_mixer_dirty(u->sink, TRUE);
1219 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1220 pa_sink_get_volume(u->sink, TRUE);
1221 pa_sink_get_mute(u->sink, TRUE);
1227 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1228 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1231 pa_assert(u->mixer_handle);
1233 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1236 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1237 pa_sink_set_mixer_dirty(u->sink, TRUE);
1241 if (mask & SND_CTL_EVENT_MASK_VALUE)
1242 pa_sink_update_volume_and_mute(u->sink);
1247 static void sink_get_volume_cb(pa_sink *s) {
1248 struct userdata *u = s->userdata;
1250 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1253 pa_assert(u->mixer_path);
1254 pa_assert(u->mixer_handle);
1256 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1259 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1260 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1262 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1264 if (u->mixer_path->has_dB) {
1265 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1267 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1270 if (pa_cvolume_equal(&u->hardware_volume, &r))
1273 s->real_volume = u->hardware_volume = r;
1275 /* Hmm, so the hardware volume changed, let's reset our software volume */
1276 if (u->mixer_path->has_dB)
1277 pa_sink_set_soft_volume(s, NULL);
1280 static void sink_set_volume_cb(pa_sink *s) {
1281 struct userdata *u = s->userdata;
1283 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1284 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1287 pa_assert(u->mixer_path);
1288 pa_assert(u->mixer_handle);
1290 /* Shift up by the base volume */
1291 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1293 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1296 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1297 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1299 u->hardware_volume = r;
1301 if (u->mixer_path->has_dB) {
1302 pa_cvolume new_soft_volume;
1303 pa_bool_t accurate_enough;
1304 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1306 /* Match exactly what the user requested by software */
1307 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1309 /* If the adjustment to do in software is only minimal we
1310 * can skip it. That saves us CPU at the expense of a bit of
1313 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1314 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1316 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1317 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1318 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1319 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1320 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1321 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1322 pa_yes_no(accurate_enough));
1323 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1325 if (!accurate_enough)
1326 s->soft_volume = new_soft_volume;
1329 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1331 /* We can't match exactly what the user requested, hence let's
1332 * at least tell the user about it */
1338 static void sink_write_volume_cb(pa_sink *s) {
1339 struct userdata *u = s->userdata;
1340 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1343 pa_assert(u->mixer_path);
1344 pa_assert(u->mixer_handle);
1345 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1347 /* Shift up by the base volume */
1348 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1350 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1351 pa_log_error("Writing HW volume failed");
1354 pa_bool_t accurate_enough;
1356 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1357 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1359 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1361 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1362 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1364 if (!accurate_enough) {
1366 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1367 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1370 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1371 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1372 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1373 pa_log_debug(" in dB: %s (request) != %s",
1374 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1375 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1380 static void sink_get_mute_cb(pa_sink *s) {
1381 struct userdata *u = s->userdata;
1385 pa_assert(u->mixer_path);
1386 pa_assert(u->mixer_handle);
1388 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1394 static void sink_set_mute_cb(pa_sink *s) {
1395 struct userdata *u = s->userdata;
1398 pa_assert(u->mixer_path);
1399 pa_assert(u->mixer_handle);
1401 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1404 static void mixer_volume_init(struct userdata *u) {
1407 if (!u->mixer_path->has_volume) {
1408 pa_sink_set_write_volume_callback(u->sink, NULL);
1409 pa_sink_set_get_volume_callback(u->sink, NULL);
1410 pa_sink_set_set_volume_callback(u->sink, NULL);
1412 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1414 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1415 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1417 if (u->mixer_path->has_dB && u->deferred_volume) {
1418 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1419 pa_log_info("Successfully enabled deferred volume.");
1421 pa_sink_set_write_volume_callback(u->sink, NULL);
1423 if (u->mixer_path->has_dB) {
1424 pa_sink_enable_decibel_volume(u->sink, TRUE);
1425 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1427 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1428 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1430 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1432 pa_sink_enable_decibel_volume(u->sink, FALSE);
1433 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1435 u->sink->base_volume = PA_VOLUME_NORM;
1436 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1439 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1442 if (!u->mixer_path->has_mute) {
1443 pa_sink_set_get_mute_callback(u->sink, NULL);
1444 pa_sink_set_set_mute_callback(u->sink, NULL);
1445 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1447 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1448 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1449 pa_log_info("Using hardware mute control.");
1453 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1454 struct userdata *u = s->userdata;
1455 pa_alsa_port_data *data;
1459 pa_assert(u->mixer_handle);
1461 data = PA_DEVICE_PORT_DATA(p);
1463 pa_assert_se(u->mixer_path = data->path);
1464 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1466 mixer_volume_init(u);
1469 pa_alsa_setting_select(data->setting, u->mixer_handle);
1473 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1474 if (s->write_volume)
1484 static void sink_update_requested_latency_cb(pa_sink *s) {
1485 struct userdata *u = s->userdata;
1488 pa_assert(u->use_tsched); /* only when timer scheduling is used
1489 * we can dynamically adjust the
1495 before = u->hwbuf_unused;
1496 update_sw_params(u);
1498 /* Let's check whether we now use only a smaller part of the
1499 buffer then before. If so, we need to make sure that subsequent
1500 rewinds are relative to the new maximum fill level and not to the
1501 current fill level. Thus, let's do a full rewind once, to clear
1504 if (u->hwbuf_unused > before) {
1505 pa_log_debug("Requesting rewind due to latency change.");
1506 pa_sink_request_rewind(s, (size_t) -1);
1510 static pa_idxset* sink_get_formats(pa_sink *s) {
1511 struct userdata *u = s->userdata;
1512 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1518 PA_IDXSET_FOREACH(f, u->formats, idx) {
1519 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1525 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1526 struct userdata *u = s->userdata;
1527 pa_format_info *f, *g;
1532 /* FIXME: also validate sample rates against what the device supports */
1533 PA_IDXSET_FOREACH(f, formats, idx) {
1534 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1535 /* EAC3 cannot be sent over over S/PDIF */
1539 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1540 u->formats = pa_idxset_new(NULL, NULL);
1542 /* Note: the logic below won't apply if we're using software encoding.
1543 * This is fine for now since we don't support that via the passthrough
1544 * framework, but this must be changed if we do. */
1546 /* Count how many sample rates we support */
1547 for (idx = 0, n = 0; u->rates[idx]; idx++)
1550 /* First insert non-PCM formats since we prefer those. */
1551 PA_IDXSET_FOREACH(f, formats, idx) {
1552 if (!pa_format_info_is_pcm(f)) {
1553 g = pa_format_info_copy(f);
1554 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1555 pa_idxset_put(u->formats, g, NULL);
1559 /* Now add any PCM formats */
1560 PA_IDXSET_FOREACH(f, formats, idx) {
1561 if (pa_format_info_is_pcm(f)) {
1562 /* We don't set rates here since we'll just tack on a resampler for
1563 * unsupported rates */
1564 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1571 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1573 struct userdata *u = s->userdata;
1575 pa_bool_t supported = FALSE;
1579 for (i = 0; u->rates[i]; i++) {
1580 if (u->rates[i] == rate) {
1587 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1591 if (!PA_SINK_IS_OPENED(s->state)) {
1592 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1593 u->sink->sample_spec.rate = rate;
1600 static int process_rewind(struct userdata *u) {
1601 snd_pcm_sframes_t unused;
1602 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1605 /* Figure out how much we shall rewind and reset the counter */
1606 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1608 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1610 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1611 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1615 unused_nbytes = (size_t) unused * u->frame_size;
1617 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1618 unused_nbytes += u->rewind_safeguard;
1620 if (u->hwbuf_size > unused_nbytes)
1621 limit_nbytes = u->hwbuf_size - unused_nbytes;
1625 if (rewind_nbytes > limit_nbytes)
1626 rewind_nbytes = limit_nbytes;
1628 if (rewind_nbytes > 0) {
1629 snd_pcm_sframes_t in_frames, out_frames;
1631 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1633 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1634 pa_log_debug("before: %lu", (unsigned long) in_frames);
1635 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1636 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1637 if (try_recover(u, "process_rewind", out_frames) < 0)
1642 pa_log_debug("after: %lu", (unsigned long) out_frames);
1644 rewind_nbytes = (size_t) out_frames * u->frame_size;
1646 if (rewind_nbytes <= 0)
1647 pa_log_info("Tried rewind, but was apparently not possible.");
1649 u->write_count -= rewind_nbytes;
1650 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1651 pa_sink_process_rewind(u->sink, rewind_nbytes);
1653 u->after_rewind = TRUE;
1657 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1659 pa_sink_process_rewind(u->sink, 0);
1663 static void thread_func(void *userdata) {
1664 struct userdata *u = userdata;
1665 unsigned short revents = 0;
1669 pa_log_debug("Thread starting up");
1671 if (u->core->realtime_scheduling)
1672 pa_make_realtime(u->core->realtime_priority);
1674 pa_thread_mq_install(&u->thread_mq);
1678 pa_usec_t rtpoll_sleep = 0;
1681 pa_log_debug("Loop");
1684 /* Render some data and write it to the dsp */
1685 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1687 pa_usec_t sleep_usec = 0;
1688 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1690 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1691 if (process_rewind(u) < 0)
1695 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1697 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1702 /* pa_log_debug("work_done = %i", work_done); */
1707 pa_log_info("Starting playback.");
1708 snd_pcm_start(u->pcm_handle);
1710 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1718 if (u->use_tsched) {
1721 if (u->since_start <= u->hwbuf_size) {
1723 /* USB devices on ALSA seem to hit a buffer
1724 * underrun during the first iterations much
1725 * quicker then we calculate here, probably due to
1726 * the transport latency. To accommodate for that
1727 * we artificially decrease the sleep time until
1728 * we have filled the buffer at least once
1731 if (pa_log_ratelimit(PA_LOG_DEBUG))
1732 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1736 /* OK, the playback buffer is now full, let's
1737 * calculate when to wake up next */
1739 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1742 /* Convert from the sound card time domain to the
1743 * system time domain */
1744 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1747 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1750 /* We don't trust the conversion, so we wake up whatever comes first */
1751 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1754 u->after_rewind = FALSE;
1758 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1759 pa_usec_t volume_sleep;
1760 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1761 if (volume_sleep > 0) {
1762 if (rtpoll_sleep > 0)
1763 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1765 rtpoll_sleep = volume_sleep;
1769 if (rtpoll_sleep > 0)
1770 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1772 pa_rtpoll_set_timer_disabled(u->rtpoll);
1774 /* Hmm, nothing to do. Let's sleep */
1775 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1778 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1779 pa_sink_volume_change_apply(u->sink, NULL);
1784 /* Tell ALSA about this and process its response */
1785 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1786 struct pollfd *pollfd;
1790 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1792 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1793 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1797 if (revents & ~POLLOUT) {
1798 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1804 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1805 pa_log_debug("Wakeup from ALSA!");
1812 /* If this was no regular exit from the loop we have to continue
1813 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1814 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1815 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1818 pa_log_debug("Thread shutting down");
1821 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1827 pa_assert(device_name);
1829 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1830 pa_sink_new_data_set_name(data, n);
1831 data->namereg_fail = TRUE;
1835 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1836 data->namereg_fail = TRUE;
1838 n = device_id ? device_id : device_name;
1839 data->namereg_fail = FALSE;
1843 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1845 t = pa_sprintf_malloc("alsa_output.%s", n);
1847 pa_sink_new_data_set_name(data, t);
1851 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1854 if (!mapping && !element)
1857 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1858 pa_log_info("Failed to find a working mixer device.");
1864 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1867 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1870 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1871 pa_alsa_path_dump(u->mixer_path);
1872 } else if (!(u->mixer_path_set = mapping->output_path_set))
1879 if (u->mixer_path) {
1880 pa_alsa_path_free(u->mixer_path);
1881 u->mixer_path = NULL;
1884 if (u->mixer_handle) {
1885 snd_mixer_close(u->mixer_handle);
1886 u->mixer_handle = NULL;
1891 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1892 pa_bool_t need_mixer_callback = FALSE;
1896 if (!u->mixer_handle)
1899 if (u->sink->active_port) {
1900 pa_alsa_port_data *data;
1902 /* We have a list of supported paths, so let's activate the
1903 * one that has been chosen as active */
1905 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1906 u->mixer_path = data->path;
1908 pa_alsa_path_select(data->path, u->mixer_handle);
1911 pa_alsa_setting_select(data->setting, u->mixer_handle);
1915 if (!u->mixer_path && u->mixer_path_set)
1916 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1918 if (u->mixer_path) {
1919 /* Hmm, we have only a single path, then let's activate it */
1921 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1923 if (u->mixer_path->settings)
1924 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1929 mixer_volume_init(u);
1931 /* Will we need to register callbacks? */
1932 if (u->mixer_path_set && u->mixer_path_set->paths) {
1936 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1937 if (p->has_volume || p->has_mute)
1938 need_mixer_callback = TRUE;
1941 else if (u->mixer_path)
1942 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1944 if (need_mixer_callback) {
1945 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1946 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1947 u->mixer_pd = pa_alsa_mixer_pdata_new();
1948 mixer_callback = io_mixer_callback;
1950 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1951 pa_log("Failed to initialize file descriptor monitoring");
1955 u->mixer_fdl = pa_alsa_fdlist_new();
1956 mixer_callback = ctl_mixer_callback;
1958 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1959 pa_log("Failed to initialize file descriptor monitoring");
1964 if (u->mixer_path_set)
1965 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1967 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1973 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1975 struct userdata *u = NULL;
1976 const char *dev_id = NULL;
1978 uint32_t alternate_sample_rate;
1980 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1981 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1983 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
1984 pa_sink_new_data data;
1985 pa_alsa_profile_set *profile_set = NULL;
1990 ss = m->core->default_sample_spec;
1991 map = m->core->default_channel_map;
1992 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1993 pa_log("Failed to parse sample specification and channel map");
1997 alternate_sample_rate = m->core->alternate_sample_rate;
1998 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1999 pa_log("Failed to parse alternate sample rate");
2003 frame_size = pa_frame_size(&ss);
2005 nfrags = m->core->default_n_fragments;
2006 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2008 frag_size = (uint32_t) frame_size;
2009 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2010 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2012 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2013 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2014 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2015 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2016 pa_log("Failed to parse buffer metrics");
2020 buffer_size = nfrags * frag_size;
2022 period_frames = frag_size/frame_size;
2023 buffer_frames = buffer_size/frame_size;
2024 tsched_frames = tsched_size/frame_size;
2026 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2027 pa_log("Failed to parse mmap argument.");
2031 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2032 pa_log("Failed to parse tsched argument.");
2036 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2037 pa_log("Failed to parse ignore_dB argument.");
2041 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2042 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2043 pa_log("Failed to parse rewind_safeguard argument");
2047 deferred_volume = m->core->deferred_volume;
2048 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2049 pa_log("Failed to parse deferred_volume argument.");
2053 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2054 pa_log("Failed to parse fixed_latency_range argument.");
2058 use_tsched = pa_alsa_may_tsched(use_tsched);
2060 u = pa_xnew0(struct userdata, 1);
2063 u->use_mmap = use_mmap;
2064 u->use_tsched = use_tsched;
2065 u->deferred_volume = deferred_volume;
2066 u->fixed_latency_range = fixed_latency_range;
2068 u->rewind_safeguard = rewind_safeguard;
2069 u->rtpoll = pa_rtpoll_new();
2070 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2072 u->smoother = pa_smoother_new(
2073 SMOOTHER_ADJUST_USEC,
2074 SMOOTHER_WINDOW_USEC,
2080 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2082 dev_id = pa_modargs_get_value(
2084 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2086 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2088 if (reserve_init(u, dev_id) < 0)
2091 if (reserve_monitor_init(u, dev_id) < 0)
2099 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2100 pa_log("device_id= not set");
2104 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2108 SND_PCM_STREAM_PLAYBACK,
2109 &period_frames, &buffer_frames, tsched_frames,
2113 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2115 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2118 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2122 SND_PCM_STREAM_PLAYBACK,
2123 &period_frames, &buffer_frames, tsched_frames,
2124 &b, &d, profile_set, &mapping)))
2129 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2130 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2133 SND_PCM_STREAM_PLAYBACK,
2134 &period_frames, &buffer_frames, tsched_frames,
2139 pa_assert(u->device_name);
2140 pa_log_info("Successfully opened device %s.", u->device_name);
2142 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2143 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2148 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2150 if (use_mmap && !b) {
2151 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2152 u->use_mmap = use_mmap = FALSE;
2155 if (use_tsched && (!b || !d)) {
2156 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2157 u->use_tsched = use_tsched = FALSE;
2161 pa_log_info("Successfully enabled mmap() mode.");
2163 if (u->use_tsched) {
2164 pa_log_info("Successfully enabled timer-based scheduling mode.");
2166 if (u->fixed_latency_range)
2167 pa_log_info("Disabling latency range changes on underrun");
2170 if (is_iec958(u) || is_hdmi(u))
2173 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2175 pa_log_error("Failed to find any supported sample rates.");
2179 /* ALSA might tweak the sample spec, so recalculate the frame size */
2180 frame_size = pa_frame_size(&ss);
2182 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2184 pa_sink_new_data_init(&data);
2185 data.driver = driver;
2188 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2190 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2191 * variable instead of using &data.namereg_fail directly, because
2192 * data.namereg_fail is a bitfield and taking the address of a bitfield
2193 * variable is impossible. */
2194 namereg_fail = data.namereg_fail;
2195 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2196 pa_log("Failed to parse namereg_fail argument.");
2197 pa_sink_new_data_done(&data);
2200 data.namereg_fail = namereg_fail;
2202 pa_sink_new_data_set_sample_spec(&data, &ss);
2203 pa_sink_new_data_set_channel_map(&data, &map);
2204 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2206 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2207 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2208 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2209 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2210 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2213 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2214 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2217 pa_alsa_init_description(data.proplist);
2219 if (u->control_device)
2220 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2222 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2223 pa_log("Invalid properties");
2224 pa_sink_new_data_done(&data);
2228 if (u->mixer_path_set)
2229 pa_alsa_add_ports(&data.ports, u->mixer_path_set, card);
2231 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2232 (set_formats ? PA_SINK_SET_FORMATS : 0));
2233 pa_sink_new_data_done(&data);
2236 pa_log("Failed to create sink object");
2240 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2241 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2242 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2246 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2247 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2248 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2252 u->sink->parent.process_msg = sink_process_msg;
2254 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2255 u->sink->set_state = sink_set_state_cb;
2256 u->sink->set_port = sink_set_port_cb;
2257 if (u->sink->alternate_sample_rate)
2258 u->sink->update_rate = sink_update_rate_cb;
2259 u->sink->userdata = u;
2261 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2262 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2264 u->frame_size = frame_size;
2265 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2266 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2267 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2269 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2270 (double) u->hwbuf_size / (double) u->fragment_size,
2271 (long unsigned) u->fragment_size,
2272 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2273 (long unsigned) u->hwbuf_size,
2274 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2276 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2277 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2278 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2280 pa_log_info("Disabling rewind for device %s", u->device_name);
2281 pa_sink_set_max_rewind(u->sink, 0);
2284 if (u->use_tsched) {
2285 u->tsched_watermark_ref = tsched_watermark;
2286 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2288 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2292 if (update_sw_params(u) < 0)
2295 if (setup_mixer(u, ignore_dB) < 0)
2298 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2300 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2301 pa_log("Failed to create thread.");
2305 /* Get initial mixer settings */
2306 if (data.volume_is_set) {
2307 if (u->sink->set_volume)
2308 u->sink->set_volume(u->sink);
2310 if (u->sink->get_volume)
2311 u->sink->get_volume(u->sink);
2314 if (data.muted_is_set) {
2315 if (u->sink->set_mute)
2316 u->sink->set_mute(u->sink);
2318 if (u->sink->get_mute)
2319 u->sink->get_mute(u->sink);
2322 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2323 u->sink->write_volume(u->sink);
2326 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2327 pa_format_info *format;
2329 /* To start with, we only support PCM formats. Other formats may be added
2330 * with pa_sink_set_formats().*/
2331 format = pa_format_info_new();
2332 format->encoding = PA_ENCODING_PCM;
2333 u->formats = pa_idxset_new(NULL, NULL);
2334 pa_idxset_put(u->formats, format, NULL);
2336 u->sink->get_formats = sink_get_formats;
2337 u->sink->set_formats = sink_set_formats;
2340 pa_sink_put(u->sink);
2343 pa_alsa_profile_set_free(profile_set);
2353 pa_alsa_profile_set_free(profile_set);
2358 static void userdata_free(struct userdata *u) {
2362 pa_sink_unlink(u->sink);
2365 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2366 pa_thread_free(u->thread);
2369 pa_thread_mq_done(&u->thread_mq);
2372 pa_sink_unref(u->sink);
2374 if (u->memchunk.memblock)
2375 pa_memblock_unref(u->memchunk.memblock);
2378 pa_alsa_mixer_pdata_free(u->mixer_pd);
2380 if (u->alsa_rtpoll_item)
2381 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2384 pa_rtpoll_free(u->rtpoll);
2386 if (u->pcm_handle) {
2387 snd_pcm_drop(u->pcm_handle);
2388 snd_pcm_close(u->pcm_handle);
2392 pa_alsa_fdlist_free(u->mixer_fdl);
2394 if (u->mixer_path && !u->mixer_path_set)
2395 pa_alsa_path_free(u->mixer_path);
2397 if (u->mixer_handle)
2398 snd_mixer_close(u->mixer_handle);
2401 pa_smoother_free(u->smoother);
2404 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2412 pa_xfree(u->device_name);
2413 pa_xfree(u->control_device);
2414 pa_xfree(u->paths_dir);
2418 void pa_alsa_sink_free(pa_sink *s) {
2421 pa_sink_assert_ref(s);
2422 pa_assert_se(u = s->userdata);