2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (pa_page_size())
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 struct pa_sink_volume_change {
64 PA_LLIST_FIELDS(pa_sink_volume_change);
67 struct sink_message_set_port {
72 static void sink_free(pa_object *s);
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
82 data->proplist = pa_proplist_new();
83 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
92 data->name = pa_xstrdup(name);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 data->alternate_sample_rate_is_set = true;
113 data->alternate_sample_rate = alternate_sample_rate;
116 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 if ((data->volume_is_set = !!volume))
120 data->volume = *volume;
123 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
126 data->muted_is_set = true;
130 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_xfree(data->active_port);
134 data->active_port = pa_xstrdup(port);
137 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_proplist_free(data->proplist);
143 pa_hashmap_free(data->ports);
145 pa_xfree(data->name);
146 pa_xfree(data->active_port);
149 /* Called from main context */
150 static void reset_callbacks(pa_sink *s) {
154 s->get_volume = NULL;
155 s->set_volume = NULL;
156 s->write_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
162 s->get_formats = NULL;
163 s->set_formats = NULL;
164 s->update_rate = NULL;
167 /* Called from main context */
168 pa_sink* pa_sink_new(
170 pa_sink_new_data *data,
171 pa_sink_flags_t flags) {
175 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
176 pa_source_new_data source_data;
182 pa_assert(data->name);
183 pa_assert_ctl_context();
185 s = pa_msgobject_new(pa_sink);
187 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
188 pa_log_debug("Failed to register name %s.", data->name);
193 pa_sink_new_data_set_name(data, name);
195 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
197 pa_namereg_unregister(core, name);
201 /* FIXME, need to free s here on failure */
203 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
204 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
206 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
208 if (!data->channel_map_is_set)
209 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
211 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
212 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
214 /* FIXME: There should probably be a general function for checking whether
215 * the sink volume is allowed to be set, like there is for sink inputs. */
216 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
218 if (!data->volume_is_set) {
219 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
220 data->save_volume = false;
223 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
224 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
226 if (!data->muted_is_set)
230 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
232 pa_device_init_description(data->proplist, data->card);
233 pa_device_init_icon(data->proplist, true);
234 pa_device_init_intended_roles(data->proplist);
236 if (!data->active_port) {
237 pa_device_port *p = pa_device_port_find_best(data->ports);
239 pa_sink_new_data_set_port(data, p->name);
242 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
244 pa_namereg_unregister(core, name);
248 s->parent.parent.free = sink_free;
249 s->parent.process_msg = pa_sink_process_msg;
252 s->state = PA_SINK_INIT;
255 s->suspend_cause = data->suspend_cause;
256 pa_sink_set_mixer_dirty(s, false);
257 s->name = pa_xstrdup(name);
258 s->proplist = pa_proplist_copy(data->proplist);
259 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
260 s->module = data->module;
261 s->card = data->card;
263 s->priority = pa_device_init_priority(s->proplist);
265 s->sample_spec = data->sample_spec;
266 s->channel_map = data->channel_map;
267 s->default_sample_rate = s->sample_spec.rate;
269 if (data->alternate_sample_rate_is_set)
270 s->alternate_sample_rate = data->alternate_sample_rate;
272 s->alternate_sample_rate = s->core->alternate_sample_rate;
274 if (s->sample_spec.rate == s->alternate_sample_rate) {
275 pa_log_warn("Default and alternate sample rates are the same.");
276 s->alternate_sample_rate = 0;
279 s->inputs = pa_idxset_new(NULL, NULL);
281 s->input_to_master = NULL;
283 s->reference_volume = s->real_volume = data->volume;
284 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
285 s->base_volume = PA_VOLUME_NORM;
286 s->n_volume_steps = PA_VOLUME_NORM+1;
287 s->muted = data->muted;
288 s->refresh_volume = s->refresh_muted = false;
295 /* As a minor optimization we just steal the list instead of
297 s->ports = data->ports;
300 s->active_port = NULL;
301 s->save_port = false;
303 if (data->active_port)
304 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
305 s->save_port = data->save_port;
307 /* Hopefully the active port has already been assigned in the previous call
308 to pa_device_port_find_best, but better safe than sorry */
310 s->active_port = pa_device_port_find_best(s->ports);
313 s->port_latency_offset = s->active_port->latency_offset;
315 s->port_latency_offset = 0;
317 s->save_volume = data->save_volume;
318 s->save_muted = data->save_muted;
320 pa_silence_memchunk_get(
321 &core->silence_cache,
327 s->thread_info.rtpoll = NULL;
328 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
329 (pa_free_cb_t) pa_sink_input_unref);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = false;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = false;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.port_latency_offset = s->port_latency_offset;
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
382 pa_source_new_data_done(&source_data);
384 if (!s->monitor_source) {
390 s->monitor_source->monitor_of = s;
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
403 pa_sink_state_t original_state;
406 pa_assert_ctl_context();
408 if (s->state == state)
411 original_state = s->state;
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
418 if ((ret = s->set_state(s, state)) < 0)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
425 s->set_state(s, original_state);
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
437 if (suspend_change) {
441 /* We're suspending or resuming, tell everyone about it */
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
448 i->suspend(i, state == PA_SINK_SUSPENDED);
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
467 pa_assert(!s->write_volume || cb);
471 /* Save the current flags so we can tell if they've changed */
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
492 pa_assert(!cb || s->set_volume);
494 s->write_volume = cb;
496 /* Save the current flags so we can tell if they've changed */
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
522 /* Save the current flags so we can tell if they've changed */
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
535 static void enable_flat_volume(pa_sink *s, bool enable) {
536 pa_sink_flags_t flags;
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
543 /* Save the current flags so we can tell if they've changed */
547 s->flags |= PA_SINK_FLAT_VOLUME;
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
556 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
557 pa_sink_flags_t flags;
561 /* Save the current flags so we can tell if they've changed */
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, true);
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, false);
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
597 * Note: All of these flags set here can change over the life time
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
615 pa_sink_enable_decibel_volume(s, true);
616 s->soft_volume = s->reference_volume;
619 /* If the sink implementor support DB volumes by itself, we should always
620 * try and enable flat volumes too */
621 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
622 enable_flat_volume(s, true);
624 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
625 pa_sink *root_sink = pa_sink_get_master(s);
627 pa_assert(root_sink);
629 s->reference_volume = root_sink->reference_volume;
630 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
632 s->real_volume = root_sink->real_volume;
633 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
635 /* We assume that if the sink implementor changed the default
636 * volume he did so in real_volume, because that is the usual
637 * place where he is supposed to place his changes. */
638 s->reference_volume = s->real_volume;
640 s->thread_info.soft_volume = s->soft_volume;
641 s->thread_info.soft_muted = s->muted;
642 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
644 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
645 || (s->base_volume == PA_VOLUME_NORM
646 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
647 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
649 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
650 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
652 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
653 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
654 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
656 if (s->suspend_cause)
657 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
659 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
661 pa_source_put(s->monitor_source);
663 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
664 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
666 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
667 * because module-switch-on-connect needs to know the old default sink */
668 pa_core_update_default_sink(s->core);
671 /* Called from main context */
672 void pa_sink_unlink(pa_sink* s) {
674 pa_sink_input *i, PA_UNUSED *j = NULL;
676 pa_sink_assert_ref(s);
677 pa_assert_ctl_context();
679 /* Please note that pa_sink_unlink() does more than simply
680 * reversing pa_sink_put(). It also undoes the registrations
681 * already done in pa_sink_new()! */
683 if (s->unlink_requested)
686 s->unlink_requested = true;
688 linked = PA_SINK_IS_LINKED(s->state);
691 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
693 if (s->state != PA_SINK_UNLINKED)
694 pa_namereg_unregister(s->core, s->name);
695 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
697 pa_core_update_default_sink(s->core);
700 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
702 while ((i = pa_idxset_first(s->inputs, NULL))) {
704 pa_sink_input_kill(i);
709 sink_set_state(s, PA_SINK_UNLINKED);
711 s->state = PA_SINK_UNLINKED;
715 if (s->monitor_source)
716 pa_source_unlink(s->monitor_source);
719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
720 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
724 /* Called from main context */
725 static void sink_free(pa_object *o) {
726 pa_sink *s = PA_SINK(o);
729 pa_assert_ctl_context();
730 pa_assert(pa_sink_refcnt(s) == 0);
731 pa_assert(!PA_SINK_IS_LINKED(s->state));
733 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
735 pa_sink_volume_change_flush(s);
737 if (s->monitor_source) {
738 pa_source_unref(s->monitor_source);
739 s->monitor_source = NULL;
742 pa_idxset_free(s->inputs, NULL);
743 pa_hashmap_free(s->thread_info.inputs);
745 if (s->silence.memblock)
746 pa_memblock_unref(s->silence.memblock);
752 pa_proplist_free(s->proplist);
755 pa_hashmap_free(s->ports);
760 /* Called from main context, and not while the IO thread is active, please */
761 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
762 pa_sink_assert_ref(s);
763 pa_assert_ctl_context();
767 if (s->monitor_source)
768 pa_source_set_asyncmsgq(s->monitor_source, q);
771 /* Called from main context, and not while the IO thread is active, please */
772 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
773 pa_sink_flags_t old_flags;
774 pa_sink_input *input;
777 pa_sink_assert_ref(s);
778 pa_assert_ctl_context();
780 /* For now, allow only a minimal set of flags to be changed. */
781 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
783 old_flags = s->flags;
784 s->flags = (s->flags & ~mask) | (value & mask);
786 if (s->flags == old_flags)
789 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
790 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
792 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
793 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
794 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
796 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
797 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
799 if (s->monitor_source)
800 pa_source_update_flags(s->monitor_source,
801 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
802 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
803 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
804 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
806 PA_IDXSET_FOREACH(input, s->inputs, idx) {
807 if (input->origin_sink)
808 pa_sink_update_flags(input->origin_sink, mask, value);
812 /* Called from IO context, or before _put() from main context */
813 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
814 pa_sink_assert_ref(s);
815 pa_sink_assert_io_context(s);
817 s->thread_info.rtpoll = p;
819 if (s->monitor_source)
820 pa_source_set_rtpoll(s->monitor_source, p);
823 /* Called from main context */
824 int pa_sink_update_status(pa_sink*s) {
825 pa_sink_assert_ref(s);
826 pa_assert_ctl_context();
827 pa_assert(PA_SINK_IS_LINKED(s->state));
829 if (s->state == PA_SINK_SUSPENDED)
832 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
835 /* Called from any context - must be threadsafe */
836 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
837 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
840 /* Called from main context */
841 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
842 pa_sink_assert_ref(s);
843 pa_assert_ctl_context();
844 pa_assert(PA_SINK_IS_LINKED(s->state));
845 pa_assert(cause != 0);
848 s->suspend_cause |= cause;
849 s->monitor_source->suspend_cause |= cause;
851 s->suspend_cause &= ~cause;
852 s->monitor_source->suspend_cause &= ~cause;
855 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
856 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
857 it'll be handled just fine. */
858 pa_sink_set_mixer_dirty(s, false);
859 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
860 if (s->active_port && s->set_port) {
861 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
862 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
863 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
866 s->set_port(s, s->active_port);
876 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
879 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
881 if (s->suspend_cause)
882 return sink_set_state(s, PA_SINK_SUSPENDED);
884 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
887 /* Called from main context */
888 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
889 pa_sink_input *i, *n;
892 pa_sink_assert_ref(s);
893 pa_assert_ctl_context();
894 pa_assert(PA_SINK_IS_LINKED(s->state));
899 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
900 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
902 pa_sink_input_ref(i);
904 if (pa_sink_input_start_move(i) >= 0)
907 pa_sink_input_unref(i);
913 /* Called from main context */
914 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
917 pa_sink_assert_ref(s);
918 pa_assert_ctl_context();
919 pa_assert(PA_SINK_IS_LINKED(s->state));
922 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
923 if (pa_sink_input_finish_move(i, s, save) < 0)
924 pa_sink_input_fail_move(i);
926 pa_sink_input_unref(i);
929 pa_queue_free(q, NULL);
932 /* Called from main context */
933 void pa_sink_move_all_fail(pa_queue *q) {
936 pa_assert_ctl_context();
939 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
940 pa_sink_input_fail_move(i);
941 pa_sink_input_unref(i);
944 pa_queue_free(q, NULL);
947 /* Called from IO thread context */
948 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
953 pa_sink_assert_ref(s);
954 pa_sink_assert_io_context(s);
956 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
957 size_t uf = i->thread_info.underrun_for_sink;
959 /* Propagate down the filter tree */
960 if (i->origin_sink) {
961 size_t filter_result, left_to_play_origin;
963 /* The recursive call works in the origin sink domain ... */
964 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
966 /* .. and returns the time to sleep before waking up. We need the
967 * underrun duration for comparisons, so we undo the subtraction on
968 * the return value... */
969 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
971 /* ... and convert it back to the master sink domain */
972 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
974 /* Remember the longest underrun so far */
975 if (filter_result > result)
976 result = filter_result;
980 /* No underrun here, move on */
982 } else if (uf >= left_to_play) {
983 /* The sink has possibly consumed all the data the sink input provided */
984 pa_sink_input_process_underrun(i);
985 } else if (uf > result) {
986 /* Remember the longest underrun so far */
992 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
993 (long) result, (long) left_to_play - result);
994 return left_to_play - result;
997 /* Called from IO thread context */
998 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1002 pa_sink_assert_ref(s);
1003 pa_sink_assert_io_context(s);
1004 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1006 /* If nobody requested this and this is actually no real rewind
1007 * then we can short cut this. Please note that this means that
1008 * not all rewind requests triggered upstream will always be
1009 * translated in actual requests! */
1010 if (!s->thread_info.rewind_requested && nbytes <= 0)
1013 s->thread_info.rewind_nbytes = 0;
1014 s->thread_info.rewind_requested = false;
1017 pa_log_debug("Processing rewind...");
1018 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1019 pa_sink_volume_change_rewind(s, nbytes);
1022 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1023 pa_sink_input_assert_ref(i);
1024 pa_sink_input_process_rewind(i, nbytes);
1028 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1029 pa_source_process_rewind(s->monitor_source, nbytes);
1033 /* Called from IO thread context */
1034 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1038 size_t mixlength = *length;
1040 pa_sink_assert_ref(s);
1041 pa_sink_assert_io_context(s);
1044 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1045 pa_sink_input_assert_ref(i);
1047 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1049 if (mixlength == 0 || info->chunk.length < mixlength)
1050 mixlength = info->chunk.length;
1052 if (pa_memblock_is_silence(info->chunk.memblock)) {
1053 pa_memblock_unref(info->chunk.memblock);
1057 info->userdata = pa_sink_input_ref(i);
1059 pa_assert(info->chunk.memblock);
1060 pa_assert(info->chunk.length > 0);
1068 *length = mixlength;
1073 /* Called from IO thread context */
1074 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1078 unsigned n_unreffed = 0;
1080 pa_sink_assert_ref(s);
1081 pa_sink_assert_io_context(s);
1083 pa_assert(result->memblock);
1084 pa_assert(result->length > 0);
1086 /* We optimize for the case where the order of the inputs has not changed */
1088 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1090 pa_mix_info* m = NULL;
1092 pa_sink_input_assert_ref(i);
1094 /* Let's try to find the matching entry info the pa_mix_info array */
1095 for (j = 0; j < n; j ++) {
1097 if (info[p].userdata == i) {
1107 /* Drop read data */
1108 pa_sink_input_drop(i, result->length);
1110 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1112 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1113 void *ostate = NULL;
1114 pa_source_output *o;
1117 if (m && m->chunk.memblock) {
1119 pa_memblock_ref(c.memblock);
1120 pa_assert(result->length <= c.length);
1121 c.length = result->length;
1123 pa_memchunk_make_writable(&c, 0);
1124 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1127 pa_memblock_ref(c.memblock);
1128 pa_assert(result->length <= c.length);
1129 c.length = result->length;
1132 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1133 pa_source_output_assert_ref(o);
1134 pa_assert(o->direct_on_input == i);
1135 pa_source_post_direct(s->monitor_source, o, &c);
1138 pa_memblock_unref(c.memblock);
1143 if (m->chunk.memblock) {
1144 pa_memblock_unref(m->chunk.memblock);
1145 pa_memchunk_reset(&m->chunk);
1148 pa_sink_input_unref(m->userdata);
1155 /* Now drop references to entries that are included in the
1156 * pa_mix_info array but don't exist anymore */
1158 if (n_unreffed < n) {
1159 for (; n > 0; info++, n--) {
1161 pa_sink_input_unref(info->userdata);
1162 if (info->chunk.memblock)
1163 pa_memblock_unref(info->chunk.memblock);
1167 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1168 pa_source_post(s->monitor_source, result);
1171 /* Called from IO thread context */
1172 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1173 pa_mix_info info[MAX_MIX_CHANNELS];
1175 size_t block_size_max;
1177 pa_sink_assert_ref(s);
1178 pa_sink_assert_io_context(s);
1179 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1180 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1183 pa_assert(!s->thread_info.rewind_requested);
1184 pa_assert(s->thread_info.rewind_nbytes == 0);
1186 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1187 result->memblock = pa_memblock_ref(s->silence.memblock);
1188 result->index = s->silence.index;
1189 result->length = PA_MIN(s->silence.length, length);
1196 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1198 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1199 if (length > block_size_max)
1200 length = pa_frame_align(block_size_max, &s->sample_spec);
1202 pa_assert(length > 0);
1204 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1208 *result = s->silence;
1209 pa_memblock_ref(result->memblock);
1211 if (result->length > length)
1212 result->length = length;
1214 } else if (n == 1) {
1217 *result = info[0].chunk;
1218 pa_memblock_ref(result->memblock);
1220 if (result->length > length)
1221 result->length = length;
1223 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1225 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1226 pa_memblock_unref(result->memblock);
1227 pa_silence_memchunk_get(&s->core->silence_cache,
1232 } else if (!pa_cvolume_is_norm(&volume)) {
1233 pa_memchunk_make_writable(result, 0);
1234 pa_volume_memchunk(result, &s->sample_spec, &volume);
1238 result->memblock = pa_memblock_new(s->core->mempool, length);
1240 ptr = pa_memblock_acquire(result->memblock);
1241 result->length = pa_mix(info, n,
1244 &s->thread_info.soft_volume,
1245 s->thread_info.soft_muted);
1246 pa_memblock_release(result->memblock);
1251 inputs_drop(s, info, n, result);
1256 /* Called from IO thread context */
1257 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1258 pa_mix_info info[MAX_MIX_CHANNELS];
1260 size_t length, block_size_max;
1262 pa_sink_assert_ref(s);
1263 pa_sink_assert_io_context(s);
1264 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1266 pa_assert(target->memblock);
1267 pa_assert(target->length > 0);
1268 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1270 pa_assert(!s->thread_info.rewind_requested);
1271 pa_assert(s->thread_info.rewind_nbytes == 0);
1273 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1274 pa_silence_memchunk(target, &s->sample_spec);
1280 length = target->length;
1281 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1282 if (length > block_size_max)
1283 length = pa_frame_align(block_size_max, &s->sample_spec);
1285 pa_assert(length > 0);
1287 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1290 if (target->length > length)
1291 target->length = length;
1293 pa_silence_memchunk(target, &s->sample_spec);
1294 } else if (n == 1) {
1297 if (target->length > length)
1298 target->length = length;
1300 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1302 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1303 pa_silence_memchunk(target, &s->sample_spec);
1307 vchunk = info[0].chunk;
1308 pa_memblock_ref(vchunk.memblock);
1310 if (vchunk.length > length)
1311 vchunk.length = length;
1313 if (!pa_cvolume_is_norm(&volume)) {
1314 pa_memchunk_make_writable(&vchunk, 0);
1315 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1318 pa_memchunk_memcpy(target, &vchunk);
1319 pa_memblock_unref(vchunk.memblock);
1325 ptr = pa_memblock_acquire(target->memblock);
1327 target->length = pa_mix(info, n,
1328 (uint8_t*) ptr + target->index, length,
1330 &s->thread_info.soft_volume,
1331 s->thread_info.soft_muted);
1333 pa_memblock_release(target->memblock);
1336 inputs_drop(s, info, n, target);
1341 /* Called from IO thread context */
1342 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1346 pa_sink_assert_ref(s);
1347 pa_sink_assert_io_context(s);
1348 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1350 pa_assert(target->memblock);
1351 pa_assert(target->length > 0);
1352 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1354 pa_assert(!s->thread_info.rewind_requested);
1355 pa_assert(s->thread_info.rewind_nbytes == 0);
1357 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1358 pa_silence_memchunk(target, &s->sample_spec);
1371 pa_sink_render_into(s, &chunk);
1380 /* Called from IO thread context */
1381 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1382 pa_sink_assert_ref(s);
1383 pa_sink_assert_io_context(s);
1384 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1385 pa_assert(length > 0);
1386 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1389 pa_assert(!s->thread_info.rewind_requested);
1390 pa_assert(s->thread_info.rewind_nbytes == 0);
1394 pa_sink_render(s, length, result);
1396 if (result->length < length) {
1399 pa_memchunk_make_writable(result, length);
1401 chunk.memblock = result->memblock;
1402 chunk.index = result->index + result->length;
1403 chunk.length = length - result->length;
1405 pa_sink_render_into_full(s, &chunk);
1407 result->length = length;
1413 /* Called from main thread */
1414 int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
1416 uint32_t desired_rate;
1417 uint32_t default_rate = s->default_sample_rate;
1418 uint32_t alternate_rate = s->alternate_sample_rate;
1421 bool default_rate_is_usable = false;
1422 bool alternate_rate_is_usable = false;
1423 bool avoid_resampling = s->core->avoid_resampling;
1425 if (rate == s->sample_spec.rate)
1428 if (!s->update_rate)
1431 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1432 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1436 if (PA_SINK_IS_RUNNING(s->state)) {
1437 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1438 s->sample_spec.rate);
1442 if (s->monitor_source) {
1443 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1444 pa_log_info("Cannot update rate, monitor source is RUNNING");
1449 if (PA_UNLIKELY(!pa_sample_rate_valid(rate)))
1453 /* We have to try to use the sink input rate */
1454 desired_rate = rate;
1456 } else if (avoid_resampling && (rate >= default_rate || rate >= alternate_rate)) {
1457 /* We just try to set the sink input's sample rate if it's not too low */
1458 desired_rate = rate;
1460 } else if (default_rate == rate || alternate_rate == rate) {
1461 /* We can directly try to use this rate */
1462 desired_rate = rate;
1465 /* See if we can pick a rate that results in less resampling effort */
1466 if (default_rate % 11025 == 0 && rate % 11025 == 0)
1467 default_rate_is_usable = true;
1468 if (default_rate % 4000 == 0 && rate % 4000 == 0)
1469 default_rate_is_usable = true;
1470 if (alternate_rate && alternate_rate % 11025 == 0 && rate % 11025 == 0)
1471 alternate_rate_is_usable = true;
1472 if (alternate_rate && alternate_rate % 4000 == 0 && rate % 4000 == 0)
1473 alternate_rate_is_usable = true;
1475 if (alternate_rate_is_usable && !default_rate_is_usable)
1476 desired_rate = alternate_rate;
1478 desired_rate = default_rate;
1481 if (desired_rate == s->sample_spec.rate)
1484 if (!passthrough && pa_sink_used_by(s) > 0)
1487 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1488 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1490 if (s->update_rate(s, desired_rate) >= 0) {
1491 /* update monitor source as well */
1492 if (s->monitor_source && !passthrough)
1493 pa_source_update_rate(s->monitor_source, desired_rate, false);
1494 pa_log_info("Changed sampling rate successfully");
1496 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1497 if (i->state == PA_SINK_INPUT_CORKED)
1498 pa_sink_input_update_rate(i);
1504 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1509 /* Called from main thread */
1510 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1513 pa_sink_assert_ref(s);
1514 pa_assert_ctl_context();
1515 pa_assert(PA_SINK_IS_LINKED(s->state));
1517 /* The returned value is supposed to be in the time domain of the sound card! */
1519 if (s->state == PA_SINK_SUSPENDED)
1522 if (!(s->flags & PA_SINK_LATENCY))
1525 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1527 /* the return value is unsigned, so check that the offset can be added to usec without
1529 if (-s->port_latency_offset <= usec)
1530 usec += s->port_latency_offset;
1534 return (pa_usec_t)usec;
1537 /* Called from IO thread */
1538 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1542 pa_sink_assert_ref(s);
1543 pa_sink_assert_io_context(s);
1544 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1546 /* The returned value is supposed to be in the time domain of the sound card! */
1548 if (s->thread_info.state == PA_SINK_SUSPENDED)
1551 if (!(s->flags & PA_SINK_LATENCY))
1554 o = PA_MSGOBJECT(s);
1556 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1558 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1560 /* If allow_negative is false, the call should only return positive values, */
1561 usec += s->thread_info.port_latency_offset;
1562 if (!allow_negative && usec < 0)
1568 /* Called from the main thread (and also from the IO thread while the main
1569 * thread is waiting).
1571 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1572 * set. Instead, flat volume mode is detected by checking whether the root sink
1573 * has the flag set. */
1574 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1575 pa_sink_assert_ref(s);
1577 s = pa_sink_get_master(s);
1580 return (s->flags & PA_SINK_FLAT_VOLUME);
1585 /* Called from the main thread (and also from the IO thread while the main
1586 * thread is waiting). */
1587 pa_sink *pa_sink_get_master(pa_sink *s) {
1588 pa_sink_assert_ref(s);
1590 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1591 if (PA_UNLIKELY(!s->input_to_master))
1594 s = s->input_to_master->sink;
1600 /* Called from main context */
1601 bool pa_sink_is_filter(pa_sink *s) {
1602 pa_sink_assert_ref(s);
1604 return (s->input_to_master != NULL);
1607 /* Called from main context */
1608 bool pa_sink_is_passthrough(pa_sink *s) {
1609 pa_sink_input *alt_i;
1612 pa_sink_assert_ref(s);
1614 /* one and only one PASSTHROUGH input can possibly be connected */
1615 if (pa_idxset_size(s->inputs) == 1) {
1616 alt_i = pa_idxset_first(s->inputs, &idx);
1618 if (pa_sink_input_is_passthrough(alt_i))
1625 /* Called from main context */
1626 void pa_sink_enter_passthrough(pa_sink *s) {
1629 if (s->is_passthrough_set) {
1630 pa_log_debug("Sink %s is already in passthrough mode, nothing to do", s->name);
1634 /* disable the monitor in passthrough mode */
1635 if (s->monitor_source) {
1636 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1637 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1640 /* set the volume to NORM */
1641 s->saved_volume = *pa_sink_get_volume(s, true);
1642 s->saved_save_volume = s->save_volume;
1644 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1645 pa_sink_set_volume(s, &volume, true, false);
1647 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1649 /* force sink to be resumed in passthrough mode */
1650 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1651 s->is_passthrough_set = true;
1652 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1655 /* Called from main context */
1656 void pa_sink_leave_passthrough(pa_sink *s) {
1658 if (!s->is_passthrough_set) {
1659 pa_log_debug("Sink %s is not in passthrough mode, nothing to do", s->name);
1663 /* Unsuspend monitor */
1664 if (s->monitor_source) {
1665 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1666 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1669 /* Restore sink volume to what it was before we entered passthrough mode */
1670 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1672 pa_cvolume_init(&s->saved_volume);
1673 s->saved_save_volume = false;
1675 /* force sink to be resumed in non-passthrough mode */
1676 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1677 s->is_passthrough_set = false;
1678 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1682 /* Called from main context. */
1683 static void compute_reference_ratio(pa_sink_input *i) {
1685 pa_cvolume remapped;
1689 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1692 * Calculates the reference ratio from the sink's reference
1693 * volume. This basically calculates:
1695 * i->reference_ratio = i->volume / i->sink->reference_volume
1698 remapped = i->sink->reference_volume;
1699 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1701 ratio = i->reference_ratio;
1703 for (c = 0; c < i->sample_spec.channels; c++) {
1705 /* We don't update when the sink volume is 0 anyway */
1706 if (remapped.values[c] <= PA_VOLUME_MUTED)
1709 /* Don't update the reference ratio unless necessary */
1710 if (pa_sw_volume_multiply(
1712 remapped.values[c]) == i->volume.values[c])
1715 ratio.values[c] = pa_sw_volume_divide(
1716 i->volume.values[c],
1717 remapped.values[c]);
1720 pa_sink_input_set_reference_ratio(i, &ratio);
1723 /* Called from main context. Only called for the root sink in volume sharing
1724 * cases, except for internal recursive calls. */
1725 static void compute_reference_ratios(pa_sink *s) {
1729 pa_sink_assert_ref(s);
1730 pa_assert_ctl_context();
1731 pa_assert(PA_SINK_IS_LINKED(s->state));
1732 pa_assert(pa_sink_flat_volume_enabled(s));
1734 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1735 compute_reference_ratio(i);
1737 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1738 && PA_SINK_IS_LINKED(i->origin_sink->state))
1739 compute_reference_ratios(i->origin_sink);
1743 /* Called from main context. Only called for the root sink in volume sharing
1744 * cases, except for internal recursive calls. */
1745 static void compute_real_ratios(pa_sink *s) {
1749 pa_sink_assert_ref(s);
1750 pa_assert_ctl_context();
1751 pa_assert(PA_SINK_IS_LINKED(s->state));
1752 pa_assert(pa_sink_flat_volume_enabled(s));
1754 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1756 pa_cvolume remapped;
1758 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1759 /* The origin sink uses volume sharing, so this input's real ratio
1760 * is handled as a special case - the real ratio must be 0 dB, and
1761 * as a result i->soft_volume must equal i->volume_factor. */
1762 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1763 i->soft_volume = i->volume_factor;
1765 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1766 compute_real_ratios(i->origin_sink);
1772 * This basically calculates:
1774 * i->real_ratio := i->volume / s->real_volume
1775 * i->soft_volume := i->real_ratio * i->volume_factor
1778 remapped = s->real_volume;
1779 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1781 i->real_ratio.channels = i->sample_spec.channels;
1782 i->soft_volume.channels = i->sample_spec.channels;
1784 for (c = 0; c < i->sample_spec.channels; c++) {
1786 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1787 /* We leave i->real_ratio untouched */
1788 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1792 /* Don't lose accuracy unless necessary */
1793 if (pa_sw_volume_multiply(
1794 i->real_ratio.values[c],
1795 remapped.values[c]) != i->volume.values[c])
1797 i->real_ratio.values[c] = pa_sw_volume_divide(
1798 i->volume.values[c],
1799 remapped.values[c]);
1801 i->soft_volume.values[c] = pa_sw_volume_multiply(
1802 i->real_ratio.values[c],
1803 i->volume_factor.values[c]);
1806 /* We don't copy the soft_volume to the thread_info data
1807 * here. That must be done by the caller */
1811 static pa_cvolume *cvolume_remap_minimal_impact(
1813 const pa_cvolume *template,
1814 const pa_channel_map *from,
1815 const pa_channel_map *to) {
1820 pa_assert(template);
1823 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1824 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1826 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1827 * mapping from sink input to sink volumes:
1829 * If template is a possible remapping from v it is used instead
1830 * of remapping anew.
1832 * If the channel maps don't match we set an all-channel volume on
1833 * the sink to ensure that changing a volume on one stream has no
1834 * effect that cannot be compensated for in another stream that
1835 * does not have the same channel map as the sink. */
1837 if (pa_channel_map_equal(from, to))
1841 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1846 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1850 /* Called from main thread. Only called for the root sink in volume sharing
1851 * cases, except for internal recursive calls. */
1852 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1856 pa_sink_assert_ref(s);
1857 pa_assert(max_volume);
1858 pa_assert(channel_map);
1859 pa_assert(pa_sink_flat_volume_enabled(s));
1861 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1862 pa_cvolume remapped;
1864 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1865 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1866 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1868 /* Ignore this input. The origin sink uses volume sharing, so this
1869 * input's volume will be set to be equal to the root sink's real
1870 * volume. Obviously this input's current volume must not then
1871 * affect what the root sink's real volume will be. */
1875 remapped = i->volume;
1876 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1877 pa_cvolume_merge(max_volume, max_volume, &remapped);
1881 /* Called from main thread. Only called for the root sink in volume sharing
1882 * cases, except for internal recursive calls. */
1883 static bool has_inputs(pa_sink *s) {
1887 pa_sink_assert_ref(s);
1889 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1890 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1897 /* Called from main thread. Only called for the root sink in volume sharing
1898 * cases, except for internal recursive calls. */
1899 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1903 pa_sink_assert_ref(s);
1904 pa_assert(new_volume);
1905 pa_assert(channel_map);
1907 s->real_volume = *new_volume;
1908 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1910 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1911 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1912 if (pa_sink_flat_volume_enabled(s)) {
1913 pa_cvolume new_input_volume;
1915 /* Follow the root sink's real volume. */
1916 new_input_volume = *new_volume;
1917 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
1918 pa_sink_input_set_volume_direct(i, &new_input_volume);
1919 compute_reference_ratio(i);
1922 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1923 update_real_volume(i->origin_sink, new_volume, channel_map);
1928 /* Called from main thread. Only called for the root sink in shared volume
1930 static void compute_real_volume(pa_sink *s) {
1931 pa_sink_assert_ref(s);
1932 pa_assert_ctl_context();
1933 pa_assert(PA_SINK_IS_LINKED(s->state));
1934 pa_assert(pa_sink_flat_volume_enabled(s));
1935 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1937 /* This determines the maximum volume of all streams and sets
1938 * s->real_volume accordingly. */
1940 if (!has_inputs(s)) {
1941 /* In the special case that we have no sink inputs we leave the
1942 * volume unmodified. */
1943 update_real_volume(s, &s->reference_volume, &s->channel_map);
1947 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1949 /* First let's determine the new maximum volume of all inputs
1950 * connected to this sink */
1951 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1952 update_real_volume(s, &s->real_volume, &s->channel_map);
1954 /* Then, let's update the real ratios/soft volumes of all inputs
1955 * connected to this sink */
1956 compute_real_ratios(s);
1959 /* Called from main thread. Only called for the root sink in shared volume
1960 * cases, except for internal recursive calls. */
1961 static void propagate_reference_volume(pa_sink *s) {
1965 pa_sink_assert_ref(s);
1966 pa_assert_ctl_context();
1967 pa_assert(PA_SINK_IS_LINKED(s->state));
1968 pa_assert(pa_sink_flat_volume_enabled(s));
1970 /* This is called whenever the sink volume changes that is not
1971 * caused by a sink input volume change. We need to fix up the
1972 * sink input volumes accordingly */
1974 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1975 pa_cvolume new_volume;
1977 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1978 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1979 propagate_reference_volume(i->origin_sink);
1981 /* Since the origin sink uses volume sharing, this input's volume
1982 * needs to be updated to match the root sink's real volume, but
1983 * that will be done later in update_real_volume(). */
1987 /* This basically calculates:
1989 * i->volume := s->reference_volume * i->reference_ratio */
1991 new_volume = s->reference_volume;
1992 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
1993 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
1994 pa_sink_input_set_volume_direct(i, &new_volume);
1998 /* Called from main thread. Only called for the root sink in volume sharing
1999 * cases, except for internal recursive calls. The return value indicates
2000 * whether any reference volume actually changed. */
2001 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2003 bool reference_volume_changed;
2007 pa_sink_assert_ref(s);
2008 pa_assert(PA_SINK_IS_LINKED(s->state));
2010 pa_assert(channel_map);
2011 pa_assert(pa_cvolume_valid(v));
2014 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2016 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2017 pa_sink_set_reference_volume_direct(s, &volume);
2019 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2021 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2022 /* If the root sink's volume doesn't change, then there can't be any
2023 * changes in the other sinks in the sink tree either.
2025 * It's probably theoretically possible that even if the root sink's
2026 * volume changes slightly, some filter sink doesn't change its volume
2027 * due to rounding errors. If that happens, we still want to propagate
2028 * the changed root sink volume to the sinks connected to the
2029 * intermediate sink that didn't change its volume. This theoretical
2030 * possibility is the reason why we have that !(s->flags &
2031 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2032 * notice even if we returned here false always if
2033 * reference_volume_changed is false. */
2036 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2037 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2038 && PA_SINK_IS_LINKED(i->origin_sink->state))
2039 update_reference_volume(i->origin_sink, v, channel_map, false);
2045 /* Called from main thread */
2046 void pa_sink_set_volume(
2048 const pa_cvolume *volume,
2052 pa_cvolume new_reference_volume;
2055 pa_sink_assert_ref(s);
2056 pa_assert_ctl_context();
2057 pa_assert(PA_SINK_IS_LINKED(s->state));
2058 pa_assert(!volume || pa_cvolume_valid(volume));
2059 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2060 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2062 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2063 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2064 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2065 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2069 /* In case of volume sharing, the volume is set for the root sink first,
2070 * from which it's then propagated to the sharing sinks. */
2071 root_sink = pa_sink_get_master(s);
2073 if (PA_UNLIKELY(!root_sink))
2076 /* As a special exception we accept mono volumes on all sinks --
2077 * even on those with more complex channel maps */
2080 if (pa_cvolume_compatible(volume, &s->sample_spec))
2081 new_reference_volume = *volume;
2083 new_reference_volume = s->reference_volume;
2084 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2087 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2089 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2090 if (pa_sink_flat_volume_enabled(root_sink)) {
2091 /* OK, propagate this volume change back to the inputs */
2092 propagate_reference_volume(root_sink);
2094 /* And now recalculate the real volume */
2095 compute_real_volume(root_sink);
2097 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2101 /* If volume is NULL we synchronize the sink's real and
2102 * reference volumes with the stream volumes. */
2104 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2106 /* Ok, let's determine the new real volume */
2107 compute_real_volume(root_sink);
2109 /* Let's 'push' the reference volume if necessary */
2110 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2111 /* If the sink and its root don't have the same number of channels, we need to remap */
2112 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2113 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2114 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2116 /* Now that the reference volume is updated, we can update the streams'
2117 * reference ratios. */
2118 compute_reference_ratios(root_sink);
2121 if (root_sink->set_volume) {
2122 /* If we have a function set_volume(), then we do not apply a
2123 * soft volume by default. However, set_volume() is free to
2124 * apply one to root_sink->soft_volume */
2126 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2127 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2128 root_sink->set_volume(root_sink);
2131 /* If we have no function set_volume(), then the soft volume
2132 * becomes the real volume */
2133 root_sink->soft_volume = root_sink->real_volume;
2135 /* This tells the sink that soft volume and/or real volume changed */
2137 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2140 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2141 * Only to be called by sink implementor */
2142 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2144 pa_sink_assert_ref(s);
2145 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2147 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2148 pa_sink_assert_io_context(s);
2150 pa_assert_ctl_context();
2153 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2155 s->soft_volume = *volume;
2157 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2158 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2160 s->thread_info.soft_volume = s->soft_volume;
2163 /* Called from the main thread. Only called for the root sink in volume sharing
2164 * cases, except for internal recursive calls. */
2165 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2169 pa_sink_assert_ref(s);
2170 pa_assert(old_real_volume);
2171 pa_assert_ctl_context();
2172 pa_assert(PA_SINK_IS_LINKED(s->state));
2174 /* This is called when the hardware's real volume changes due to
2175 * some external event. We copy the real volume into our
2176 * reference volume and then rebuild the stream volumes based on
2177 * i->real_ratio which should stay fixed. */
2179 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2180 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2183 /* 1. Make the real volume the reference volume */
2184 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2187 if (pa_sink_flat_volume_enabled(s)) {
2189 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2190 pa_cvolume new_volume;
2192 /* 2. Since the sink's reference and real volumes are equal
2193 * now our ratios should be too. */
2194 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2196 /* 3. Recalculate the new stream reference volume based on the
2197 * reference ratio and the sink's reference volume.
2199 * This basically calculates:
2201 * i->volume = s->reference_volume * i->reference_ratio
2203 * This is identical to propagate_reference_volume() */
2204 new_volume = s->reference_volume;
2205 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2206 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2207 pa_sink_input_set_volume_direct(i, &new_volume);
2209 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2210 && PA_SINK_IS_LINKED(i->origin_sink->state))
2211 propagate_real_volume(i->origin_sink, old_real_volume);
2215 /* Something got changed in the hardware. It probably makes sense
2216 * to save changed hw settings given that hw volume changes not
2217 * triggered by PA are almost certainly done by the user. */
2218 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2219 s->save_volume = true;
2222 /* Called from io thread */
2223 void pa_sink_update_volume_and_mute(pa_sink *s) {
2225 pa_sink_assert_io_context(s);
2227 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2230 /* Called from main thread */
2231 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2232 pa_sink_assert_ref(s);
2233 pa_assert_ctl_context();
2234 pa_assert(PA_SINK_IS_LINKED(s->state));
2236 if (s->refresh_volume || force_refresh) {
2237 struct pa_cvolume old_real_volume;
2239 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2241 old_real_volume = s->real_volume;
2243 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2246 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2248 update_real_volume(s, &s->real_volume, &s->channel_map);
2249 propagate_real_volume(s, &old_real_volume);
2252 return &s->reference_volume;
2255 /* Called from main thread. In volume sharing cases, only the root sink may
2257 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2258 pa_cvolume old_real_volume;
2260 pa_sink_assert_ref(s);
2261 pa_assert_ctl_context();
2262 pa_assert(PA_SINK_IS_LINKED(s->state));
2263 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2265 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2267 old_real_volume = s->real_volume;
2268 update_real_volume(s, new_real_volume, &s->channel_map);
2269 propagate_real_volume(s, &old_real_volume);
2272 /* Called from main thread */
2273 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2276 pa_sink_assert_ref(s);
2277 pa_assert_ctl_context();
2279 old_muted = s->muted;
2281 if (mute == old_muted) {
2282 s->save_muted |= save;
2287 s->save_muted = save;
2289 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2290 s->set_mute_in_progress = true;
2292 s->set_mute_in_progress = false;
2295 if (!PA_SINK_IS_LINKED(s->state))
2298 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2299 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2300 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2301 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2304 /* Called from main thread */
2305 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2307 pa_sink_assert_ref(s);
2308 pa_assert_ctl_context();
2309 pa_assert(PA_SINK_IS_LINKED(s->state));
2311 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2314 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2315 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2316 pa_sink_mute_changed(s, mute);
2318 if (s->get_mute(s, &mute) >= 0)
2319 pa_sink_mute_changed(s, mute);
2326 /* Called from main thread */
2327 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2328 pa_sink_assert_ref(s);
2329 pa_assert_ctl_context();
2330 pa_assert(PA_SINK_IS_LINKED(s->state));
2332 if (s->set_mute_in_progress)
2335 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2336 * but we must have this here also, because the save parameter of
2337 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2338 * the mute state when it shouldn't be saved). */
2339 if (new_muted == s->muted)
2342 pa_sink_set_mute(s, new_muted, true);
2345 /* Called from main thread */
2346 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2347 pa_sink_assert_ref(s);
2348 pa_assert_ctl_context();
2351 pa_proplist_update(s->proplist, mode, p);
2353 if (PA_SINK_IS_LINKED(s->state)) {
2354 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2355 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2361 /* Called from main thread */
2362 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2363 void pa_sink_set_description(pa_sink *s, const char *description) {
2365 pa_sink_assert_ref(s);
2366 pa_assert_ctl_context();
2368 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2371 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2373 if (old && description && pa_streq(old, description))
2377 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2379 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2381 if (s->monitor_source) {
2384 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2385 pa_source_set_description(s->monitor_source, n);
2389 if (PA_SINK_IS_LINKED(s->state)) {
2390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2395 /* Called from main thread */
2396 unsigned pa_sink_linked_by(pa_sink *s) {
2399 pa_sink_assert_ref(s);
2400 pa_assert_ctl_context();
2401 pa_assert(PA_SINK_IS_LINKED(s->state));
2403 ret = pa_idxset_size(s->inputs);
2405 /* We add in the number of streams connected to us here. Please
2406 * note the asymmetry to pa_sink_used_by()! */
2408 if (s->monitor_source)
2409 ret += pa_source_linked_by(s->monitor_source);
2414 /* Called from main thread */
2415 unsigned pa_sink_used_by(pa_sink *s) {
2418 pa_sink_assert_ref(s);
2419 pa_assert_ctl_context();
2420 pa_assert(PA_SINK_IS_LINKED(s->state));
2422 ret = pa_idxset_size(s->inputs);
2423 pa_assert(ret >= s->n_corked);
2425 /* Streams connected to our monitor source do not matter for
2426 * pa_sink_used_by()!.*/
2428 return ret - s->n_corked;
2431 /* Called from main thread */
2432 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2437 pa_sink_assert_ref(s);
2438 pa_assert_ctl_context();
2440 if (!PA_SINK_IS_LINKED(s->state))
2445 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2446 pa_sink_input_state_t st;
2448 if (i == ignore_input)
2451 st = pa_sink_input_get_state(i);
2453 /* We do not assert here. It is perfectly valid for a sink input to
2454 * be in the INIT state (i.e. created, marked done but not yet put)
2455 * and we should not care if it's unlinked as it won't contribute
2456 * towards our busy status.
2458 if (!PA_SINK_INPUT_IS_LINKED(st))
2461 if (st == PA_SINK_INPUT_CORKED)
2464 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2470 if (s->monitor_source)
2471 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2476 /* Called from the IO thread */
2477 static void sync_input_volumes_within_thread(pa_sink *s) {
2481 pa_sink_assert_ref(s);
2482 pa_sink_assert_io_context(s);
2484 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2485 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2488 i->thread_info.soft_volume = i->soft_volume;
2489 pa_sink_input_request_rewind(i, 0, true, false, false);
2493 /* Called from the IO thread. Only called for the root sink in volume sharing
2494 * cases, except for internal recursive calls. */
2495 static void set_shared_volume_within_thread(pa_sink *s) {
2496 pa_sink_input *i = NULL;
2499 pa_sink_assert_ref(s);
2501 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2503 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2504 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2505 set_shared_volume_within_thread(i->origin_sink);
2509 /* Called from IO thread, except when it is not */
2510 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2511 pa_sink *s = PA_SINK(o);
2512 pa_sink_assert_ref(s);
2514 switch ((pa_sink_message_t) code) {
2516 case PA_SINK_MESSAGE_ADD_INPUT: {
2517 pa_sink_input *i = PA_SINK_INPUT(userdata);
2519 /* If you change anything here, make sure to change the
2520 * sink input handling a few lines down at
2521 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2523 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2525 /* Since the caller sleeps in pa_sink_input_put(), we can
2526 * safely access data outside of thread_info even though
2529 if ((i->thread_info.sync_prev = i->sync_prev)) {
2530 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2531 pa_assert(i->sync_prev->sync_next == i);
2532 i->thread_info.sync_prev->thread_info.sync_next = i;
2535 if ((i->thread_info.sync_next = i->sync_next)) {
2536 pa_assert(i->sink == i->thread_info.sync_next->sink);
2537 pa_assert(i->sync_next->sync_prev == i);
2538 i->thread_info.sync_next->thread_info.sync_prev = i;
2541 pa_sink_input_attach(i);
2543 pa_sink_input_set_state_within_thread(i, i->state);
2545 /* The requested latency of the sink input needs to be fixed up and
2546 * then configured on the sink. If this causes the sink latency to
2547 * go down, the sink implementor is responsible for doing a rewind
2548 * in the update_requested_latency() callback to ensure that the
2549 * sink buffer doesn't contain more data than what the new latency
2552 * XXX: Does it really make sense to push this responsibility to
2553 * the sink implementors? Wouldn't it be better to do it once in
2554 * the core than many times in the modules? */
2556 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2557 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2559 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2560 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2562 /* We don't rewind here automatically. This is left to the
2563 * sink input implementor because some sink inputs need a
2564 * slow start, i.e. need some time to buffer client
2565 * samples before beginning streaming.
2567 * XXX: Does it really make sense to push this functionality to
2568 * the sink implementors? Wouldn't it be better to do it once in
2569 * the core than many times in the modules? */
2571 /* In flat volume mode we need to update the volume as
2573 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2576 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2577 pa_sink_input *i = PA_SINK_INPUT(userdata);
2579 /* If you change anything here, make sure to change the
2580 * sink input handling a few lines down at
2581 * PA_SINK_MESSAGE_START_MOVE, too. */
2583 pa_sink_input_detach(i);
2585 pa_sink_input_set_state_within_thread(i, i->state);
2587 /* Since the caller sleeps in pa_sink_input_unlink(),
2588 * we can safely access data outside of thread_info even
2589 * though it is mutable */
2591 pa_assert(!i->sync_prev);
2592 pa_assert(!i->sync_next);
2594 if (i->thread_info.sync_prev) {
2595 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2596 i->thread_info.sync_prev = NULL;
2599 if (i->thread_info.sync_next) {
2600 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2601 i->thread_info.sync_next = NULL;
2604 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2605 pa_sink_invalidate_requested_latency(s, true);
2606 pa_sink_request_rewind(s, (size_t) -1);
2608 /* In flat volume mode we need to update the volume as
2610 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2613 case PA_SINK_MESSAGE_START_MOVE: {
2614 pa_sink_input *i = PA_SINK_INPUT(userdata);
2616 /* We don't support moving synchronized streams. */
2617 pa_assert(!i->sync_prev);
2618 pa_assert(!i->sync_next);
2619 pa_assert(!i->thread_info.sync_next);
2620 pa_assert(!i->thread_info.sync_prev);
2622 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2624 size_t sink_nbytes, total_nbytes;
2626 /* The old sink probably has some audio from this
2627 * stream in its buffer. We want to "take it back" as
2628 * much as possible and play it to the new sink. We
2629 * don't know at this point how much the old sink can
2630 * rewind. We have to pick something, and that
2631 * something is the full latency of the old sink here.
2632 * So we rewind the stream buffer by the sink latency
2633 * amount, which may be more than what we should
2634 * rewind. This can result in a chunk of audio being
2635 * played both to the old sink and the new sink.
2637 * FIXME: Fix this code so that we don't have to make
2638 * guesses about how much the sink will actually be
2639 * able to rewind. If someone comes up with a solution
2640 * for this, something to note is that the part of the
2641 * latency that the old sink couldn't rewind should
2642 * ideally be compensated after the stream has moved
2643 * to the new sink by adding silence. The new sink
2644 * most likely can't start playing the moved stream
2645 * immediately, and that gap should be removed from
2646 * the "compensation silence" (at least at the time of
2647 * writing this, the move finish code will actually
2648 * already take care of dropping the new sink's
2649 * unrewindable latency, so taking into account the
2650 * unrewindable latency of the old sink is the only
2653 * The render_memblockq contents are discarded,
2654 * because when the sink changes, the format of the
2655 * audio stored in the render_memblockq may change
2656 * too, making the stored audio invalid. FIXME:
2657 * However, the read and write indices are moved back
2658 * the same amount, so if they are not the same now,
2659 * they won't be the same after the rewind either. If
2660 * the write index of the render_memblockq is ahead of
2661 * the read index, then the render_memblockq will feed
2662 * the new sink some silence first, which it shouldn't
2663 * do. The write index should be flushed to be the
2664 * same as the read index. */
2666 /* Get the latency of the sink */
2667 usec = pa_sink_get_latency_within_thread(s, false);
2668 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2669 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2671 if (total_nbytes > 0) {
2672 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2673 i->thread_info.rewrite_flush = true;
2674 pa_sink_input_process_rewind(i, sink_nbytes);
2678 pa_sink_input_detach(i);
2680 /* Let's remove the sink input ...*/
2681 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2683 pa_sink_invalidate_requested_latency(s, true);
2685 pa_log_debug("Requesting rewind due to started move");
2686 pa_sink_request_rewind(s, (size_t) -1);
2688 /* In flat volume mode we need to update the volume as
2690 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2693 case PA_SINK_MESSAGE_FINISH_MOVE: {
2694 pa_sink_input *i = PA_SINK_INPUT(userdata);
2696 /* We don't support moving synchronized streams. */
2697 pa_assert(!i->sync_prev);
2698 pa_assert(!i->sync_next);
2699 pa_assert(!i->thread_info.sync_next);
2700 pa_assert(!i->thread_info.sync_prev);
2702 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2704 pa_sink_input_attach(i);
2706 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2710 /* In the ideal case the new sink would start playing
2711 * the stream immediately. That requires the sink to
2712 * be able to rewind all of its latency, which usually
2713 * isn't possible, so there will probably be some gap
2714 * before the moved stream becomes audible. We then
2715 * have two possibilities: 1) start playing the stream
2716 * from where it is now, or 2) drop the unrewindable
2717 * latency of the sink from the stream. With option 1
2718 * we won't lose any audio but the stream will have a
2719 * pause. With option 2 we may lose some audio but the
2720 * stream time will be somewhat in sync with the wall
2721 * clock. Lennart seems to have chosen option 2 (one
2722 * of the reasons might have been that option 1 is
2723 * actually much harder to implement), so we drop the
2724 * latency of the new sink from the moved stream and
2725 * hope that the sink will undo most of that in the
2728 /* Get the latency of the sink */
2729 usec = pa_sink_get_latency_within_thread(s, false);
2730 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2733 pa_sink_input_drop(i, nbytes);
2735 pa_log_debug("Requesting rewind due to finished move");
2736 pa_sink_request_rewind(s, nbytes);
2739 /* Updating the requested sink latency has to be done
2740 * after the sink rewind request, not before, because
2741 * otherwise the sink may limit the rewind amount
2744 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2745 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2747 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2748 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2750 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2753 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2754 pa_sink *root_sink = pa_sink_get_master(s);
2756 if (PA_LIKELY(root_sink))
2757 set_shared_volume_within_thread(root_sink);
2762 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2764 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2766 pa_sink_volume_change_push(s);
2768 /* Fall through ... */
2770 case PA_SINK_MESSAGE_SET_VOLUME:
2772 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2773 s->thread_info.soft_volume = s->soft_volume;
2774 pa_sink_request_rewind(s, (size_t) -1);
2777 /* Fall through ... */
2779 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2780 sync_input_volumes_within_thread(s);
2783 case PA_SINK_MESSAGE_GET_VOLUME:
2785 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2787 pa_sink_volume_change_flush(s);
2788 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2791 /* In case sink implementor reset SW volume. */
2792 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2793 s->thread_info.soft_volume = s->soft_volume;
2794 pa_sink_request_rewind(s, (size_t) -1);
2799 case PA_SINK_MESSAGE_SET_MUTE:
2801 if (s->thread_info.soft_muted != s->muted) {
2802 s->thread_info.soft_muted = s->muted;
2803 pa_sink_request_rewind(s, (size_t) -1);
2806 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2811 case PA_SINK_MESSAGE_GET_MUTE:
2813 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2814 return s->get_mute(s, userdata);
2818 case PA_SINK_MESSAGE_SET_STATE: {
2820 bool suspend_change =
2821 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2822 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2824 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2826 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2827 s->thread_info.rewind_nbytes = 0;
2828 s->thread_info.rewind_requested = false;
2831 if (suspend_change) {
2835 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2836 if (i->suspend_within_thread)
2837 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2843 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2845 pa_usec_t *usec = userdata;
2846 *usec = pa_sink_get_requested_latency_within_thread(s);
2848 /* Yes, that's right, the IO thread will see -1 when no
2849 * explicit requested latency is configured, the main
2850 * thread will see max_latency */
2851 if (*usec == (pa_usec_t) -1)
2852 *usec = s->thread_info.max_latency;
2857 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2858 pa_usec_t *r = userdata;
2860 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2865 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2866 pa_usec_t *r = userdata;
2868 r[0] = s->thread_info.min_latency;
2869 r[1] = s->thread_info.max_latency;
2874 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2876 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2879 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2881 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2884 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2886 *((size_t*) userdata) = s->thread_info.max_rewind;
2889 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2891 *((size_t*) userdata) = s->thread_info.max_request;
2894 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2896 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2899 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2901 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2904 case PA_SINK_MESSAGE_SET_PORT:
2906 pa_assert(userdata);
2908 struct sink_message_set_port *msg_data = userdata;
2909 msg_data->ret = s->set_port(s, msg_data->port);
2913 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2914 /* This message is sent from IO-thread and handled in main thread. */
2915 pa_assert_ctl_context();
2917 /* Make sure we're not messing with main thread when no longer linked */
2918 if (!PA_SINK_IS_LINKED(s->state))
2921 pa_sink_get_volume(s, true);
2922 pa_sink_get_mute(s, true);
2925 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
2926 s->thread_info.port_latency_offset = offset;
2929 case PA_SINK_MESSAGE_GET_LATENCY:
2930 case PA_SINK_MESSAGE_MAX:
2937 /* Called from main thread */
2938 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2943 pa_core_assert_ref(c);
2944 pa_assert_ctl_context();
2945 pa_assert(cause != 0);
2947 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2950 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2957 /* Called from IO thread */
2958 void pa_sink_detach_within_thread(pa_sink *s) {
2962 pa_sink_assert_ref(s);
2963 pa_sink_assert_io_context(s);
2964 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2966 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2967 pa_sink_input_detach(i);
2969 if (s->monitor_source)
2970 pa_source_detach_within_thread(s->monitor_source);
2973 /* Called from IO thread */
2974 void pa_sink_attach_within_thread(pa_sink *s) {
2978 pa_sink_assert_ref(s);
2979 pa_sink_assert_io_context(s);
2980 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2982 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2983 pa_sink_input_attach(i);
2985 if (s->monitor_source)
2986 pa_source_attach_within_thread(s->monitor_source);
2989 /* Called from IO thread */
2990 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2991 pa_sink_assert_ref(s);
2992 pa_sink_assert_io_context(s);
2993 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2995 if (nbytes == (size_t) -1)
2996 nbytes = s->thread_info.max_rewind;
2998 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3000 if (s->thread_info.rewind_requested &&
3001 nbytes <= s->thread_info.rewind_nbytes)
3004 s->thread_info.rewind_nbytes = nbytes;
3005 s->thread_info.rewind_requested = true;
3007 if (s->request_rewind)
3008 s->request_rewind(s);
3011 /* Called from IO thread */
3012 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3013 pa_usec_t result = (pa_usec_t) -1;
3016 pa_usec_t monitor_latency;
3018 pa_sink_assert_ref(s);
3019 pa_sink_assert_io_context(s);
3021 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3022 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3024 if (s->thread_info.requested_latency_valid)
3025 return s->thread_info.requested_latency;
3027 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3028 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3029 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3030 result = i->thread_info.requested_sink_latency;
3032 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3034 if (monitor_latency != (pa_usec_t) -1 &&
3035 (result == (pa_usec_t) -1 || result > monitor_latency))
3036 result = monitor_latency;
3038 if (result != (pa_usec_t) -1)
3039 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3041 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3042 /* Only cache if properly initialized */
3043 s->thread_info.requested_latency = result;
3044 s->thread_info.requested_latency_valid = true;
3050 /* Called from main thread */
3051 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3054 pa_sink_assert_ref(s);
3055 pa_assert_ctl_context();
3056 pa_assert(PA_SINK_IS_LINKED(s->state));
3058 if (s->state == PA_SINK_SUSPENDED)
3061 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3066 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3067 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3071 pa_sink_assert_ref(s);
3072 pa_sink_assert_io_context(s);
3074 if (max_rewind == s->thread_info.max_rewind)
3077 s->thread_info.max_rewind = max_rewind;
3079 if (PA_SINK_IS_LINKED(s->thread_info.state))
3080 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3081 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3083 if (s->monitor_source)
3084 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3087 /* Called from main thread */
3088 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3089 pa_sink_assert_ref(s);
3090 pa_assert_ctl_context();
3092 if (PA_SINK_IS_LINKED(s->state))
3093 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3095 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3098 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3099 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3102 pa_sink_assert_ref(s);
3103 pa_sink_assert_io_context(s);
3105 if (max_request == s->thread_info.max_request)
3108 s->thread_info.max_request = max_request;
3110 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3113 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3114 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3118 /* Called from main thread */
3119 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3120 pa_sink_assert_ref(s);
3121 pa_assert_ctl_context();
3123 if (PA_SINK_IS_LINKED(s->state))
3124 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3126 pa_sink_set_max_request_within_thread(s, max_request);
3129 /* Called from IO thread */
3130 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3134 pa_sink_assert_ref(s);
3135 pa_sink_assert_io_context(s);
3137 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3138 s->thread_info.requested_latency_valid = false;
3142 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3144 if (s->update_requested_latency)
3145 s->update_requested_latency(s);
3147 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3148 if (i->update_sink_requested_latency)
3149 i->update_sink_requested_latency(i);
3153 /* Called from main thread */
3154 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3155 pa_sink_assert_ref(s);
3156 pa_assert_ctl_context();
3158 /* min_latency == 0: no limit
3159 * min_latency anything else: specified limit
3161 * Similar for max_latency */
3163 if (min_latency < ABSOLUTE_MIN_LATENCY)
3164 min_latency = ABSOLUTE_MIN_LATENCY;
3166 if (max_latency <= 0 ||
3167 max_latency > ABSOLUTE_MAX_LATENCY)
3168 max_latency = ABSOLUTE_MAX_LATENCY;
3170 pa_assert(min_latency <= max_latency);
3172 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3173 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3174 max_latency == ABSOLUTE_MAX_LATENCY) ||
3175 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3177 if (PA_SINK_IS_LINKED(s->state)) {
3183 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3185 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3188 /* Called from main thread */
3189 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3190 pa_sink_assert_ref(s);
3191 pa_assert_ctl_context();
3192 pa_assert(min_latency);
3193 pa_assert(max_latency);
3195 if (PA_SINK_IS_LINKED(s->state)) {
3196 pa_usec_t r[2] = { 0, 0 };
3198 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3200 *min_latency = r[0];
3201 *max_latency = r[1];
3203 *min_latency = s->thread_info.min_latency;
3204 *max_latency = s->thread_info.max_latency;
3208 /* Called from IO thread */
3209 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3210 pa_sink_assert_ref(s);
3211 pa_sink_assert_io_context(s);
3213 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3214 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3215 pa_assert(min_latency <= max_latency);
3217 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3218 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3219 max_latency == ABSOLUTE_MAX_LATENCY) ||
3220 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3222 if (s->thread_info.min_latency == min_latency &&
3223 s->thread_info.max_latency == max_latency)
3226 s->thread_info.min_latency = min_latency;
3227 s->thread_info.max_latency = max_latency;
3229 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3233 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3234 if (i->update_sink_latency_range)
3235 i->update_sink_latency_range(i);
3238 pa_sink_invalidate_requested_latency(s, false);
3240 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3243 /* Called from main thread */
3244 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3245 pa_sink_assert_ref(s);
3246 pa_assert_ctl_context();
3248 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3249 pa_assert(latency == 0);
3253 if (latency < ABSOLUTE_MIN_LATENCY)
3254 latency = ABSOLUTE_MIN_LATENCY;
3256 if (latency > ABSOLUTE_MAX_LATENCY)
3257 latency = ABSOLUTE_MAX_LATENCY;
3259 if (PA_SINK_IS_LINKED(s->state))
3260 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3262 s->thread_info.fixed_latency = latency;
3264 pa_source_set_fixed_latency(s->monitor_source, latency);
3267 /* Called from main thread */
3268 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3271 pa_sink_assert_ref(s);
3272 pa_assert_ctl_context();
3274 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3277 if (PA_SINK_IS_LINKED(s->state))
3278 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3280 latency = s->thread_info.fixed_latency;
3285 /* Called from IO thread */
3286 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3287 pa_sink_assert_ref(s);
3288 pa_sink_assert_io_context(s);
3290 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3291 pa_assert(latency == 0);
3292 s->thread_info.fixed_latency = 0;
3294 if (s->monitor_source)
3295 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3300 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3301 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3303 if (s->thread_info.fixed_latency == latency)
3306 s->thread_info.fixed_latency = latency;
3308 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3312 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3313 if (i->update_sink_fixed_latency)
3314 i->update_sink_fixed_latency(i);
3317 pa_sink_invalidate_requested_latency(s, false);
3319 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3322 /* Called from main context */
3323 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3324 pa_sink_assert_ref(s);
3326 s->port_latency_offset = offset;
3328 if (PA_SINK_IS_LINKED(s->state))
3329 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3331 s->thread_info.port_latency_offset = offset;
3333 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3336 /* Called from main context */
3337 size_t pa_sink_get_max_rewind(pa_sink *s) {
3339 pa_assert_ctl_context();
3340 pa_sink_assert_ref(s);
3342 if (!PA_SINK_IS_LINKED(s->state))
3343 return s->thread_info.max_rewind;
3345 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3350 /* Called from main context */
3351 size_t pa_sink_get_max_request(pa_sink *s) {
3353 pa_sink_assert_ref(s);
3354 pa_assert_ctl_context();
3356 if (!PA_SINK_IS_LINKED(s->state))
3357 return s->thread_info.max_request;
3359 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3364 /* Called from main context */
3365 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3366 pa_device_port *port;
3369 pa_sink_assert_ref(s);
3370 pa_assert_ctl_context();
3373 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3374 return -PA_ERR_NOTIMPLEMENTED;
3378 return -PA_ERR_NOENTITY;
3380 if (!(port = pa_hashmap_get(s->ports, name)))
3381 return -PA_ERR_NOENTITY;
3383 if (s->active_port == port) {
3384 s->save_port = s->save_port || save;
3388 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3389 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3390 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3394 ret = s->set_port(s, port);
3397 return -PA_ERR_NOENTITY;
3399 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3401 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3403 s->active_port = port;
3404 s->save_port = save;
3406 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3408 /* The active port affects the default sink selection. */
3409 pa_core_update_default_sink(s->core);
3411 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3416 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3417 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3421 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3424 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3426 if (pa_streq(ff, "microphone"))
3427 t = "audio-input-microphone";
3428 else if (pa_streq(ff, "webcam"))
3430 else if (pa_streq(ff, "computer"))
3432 else if (pa_streq(ff, "handset"))
3434 else if (pa_streq(ff, "portable"))
3435 t = "multimedia-player";
3436 else if (pa_streq(ff, "tv"))
3437 t = "video-display";
3440 * The following icons are not part of the icon naming spec,
3441 * because Rodney Dawes sucks as the maintainer of that spec.
3443 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3445 else if (pa_streq(ff, "headset"))
3446 t = "audio-headset";
3447 else if (pa_streq(ff, "headphone"))
3448 t = "audio-headphones";
3449 else if (pa_streq(ff, "speaker"))
3450 t = "audio-speakers";
3451 else if (pa_streq(ff, "hands-free"))
3452 t = "audio-handsfree";
3456 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3457 if (pa_streq(c, "modem"))
3464 t = "audio-input-microphone";
3467 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3468 if (strstr(profile, "analog"))
3470 else if (strstr(profile, "iec958"))
3472 else if (strstr(profile, "hdmi"))
3476 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3478 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3483 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3484 const char *s, *d = NULL, *k;
3487 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3491 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3495 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3496 if (pa_streq(s, "internal"))
3497 d = _("Built-in Audio");
3500 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3501 if (pa_streq(s, "modem"))
3505 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3510 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3513 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3515 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3520 bool pa_device_init_intended_roles(pa_proplist *p) {
3524 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3527 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3528 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3529 || pa_streq(s, "headset")) {
3530 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3537 unsigned pa_device_init_priority(pa_proplist *p) {
3539 unsigned priority = 0;
3543 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3545 if (pa_streq(s, "sound"))
3547 else if (!pa_streq(s, "modem"))
3551 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3553 if (pa_streq(s, "headphone"))
3555 else if (pa_streq(s, "hifi"))
3557 else if (pa_streq(s, "speaker"))
3559 else if (pa_streq(s, "portable"))
3563 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3565 if (pa_streq(s, "bluetooth"))
3567 else if (pa_streq(s, "usb"))
3569 else if (pa_streq(s, "pci"))
3573 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3575 if (pa_startswith(s, "analog-"))
3577 else if (pa_startswith(s, "iec958-"))
3584 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3586 /* Called from the IO thread. */
3587 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3588 pa_sink_volume_change *c;
3589 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3590 c = pa_xnew(pa_sink_volume_change, 1);
3592 PA_LLIST_INIT(pa_sink_volume_change, c);
3594 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3598 /* Called from the IO thread. */
3599 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3601 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3605 /* Called from the IO thread. */
3606 void pa_sink_volume_change_push(pa_sink *s) {
3607 pa_sink_volume_change *c = NULL;
3608 pa_sink_volume_change *nc = NULL;
3609 pa_sink_volume_change *pc = NULL;
3610 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3612 const char *direction = NULL;
3615 nc = pa_sink_volume_change_new(s);
3617 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3618 * Adding one more volume for HW would get us rid of this, but I am trying
3619 * to survive with the ones we already have. */
3620 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3622 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3623 pa_log_debug("Volume not changing");
3624 pa_sink_volume_change_free(nc);
3628 nc->at = pa_sink_get_latency_within_thread(s, false);
3629 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3631 if (s->thread_info.volume_changes_tail) {
3632 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3633 /* If volume is going up let's do it a bit late. If it is going
3634 * down let's do it a bit early. */
3635 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3636 if (nc->at + safety_margin > c->at) {
3637 nc->at += safety_margin;
3642 else if (nc->at - safety_margin > c->at) {
3643 nc->at -= safety_margin;
3651 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3652 nc->at += safety_margin;
3655 nc->at -= safety_margin;
3658 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3661 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3664 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3666 /* We can ignore volume events that came earlier but should happen later than this. */
3667 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3668 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3669 pa_sink_volume_change_free(c);
3672 s->thread_info.volume_changes_tail = nc;
3675 /* Called from the IO thread. */
3676 static void pa_sink_volume_change_flush(pa_sink *s) {
3677 pa_sink_volume_change *c = s->thread_info.volume_changes;
3679 s->thread_info.volume_changes = NULL;
3680 s->thread_info.volume_changes_tail = NULL;
3682 pa_sink_volume_change *next = c->next;
3683 pa_sink_volume_change_free(c);
3688 /* Called from the IO thread. */
3689 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3695 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3701 pa_assert(s->write_volume);
3703 now = pa_rtclock_now();
3705 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3706 pa_sink_volume_change *c = s->thread_info.volume_changes;
3707 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3708 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3709 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3711 s->thread_info.current_hw_volume = c->hw_volume;
3712 pa_sink_volume_change_free(c);
3718 if (s->thread_info.volume_changes) {
3720 *usec_to_next = s->thread_info.volume_changes->at - now;
3721 if (pa_log_ratelimit(PA_LOG_DEBUG))
3722 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3727 s->thread_info.volume_changes_tail = NULL;
3732 /* Called from the IO thread. */
3733 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3734 /* All the queued volume events later than current latency are shifted to happen earlier. */
3735 pa_sink_volume_change *c;
3736 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3737 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3738 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3740 pa_log_debug("latency = %lld", (long long) limit);
3741 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3743 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3744 pa_usec_t modified_limit = limit;
3745 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3746 modified_limit -= s->thread_info.volume_change_safety_margin;
3748 modified_limit += s->thread_info.volume_change_safety_margin;
3749 if (c->at > modified_limit) {
3751 if (c->at < modified_limit)
3752 c->at = modified_limit;
3754 prev_vol = pa_cvolume_avg(&c->hw_volume);
3756 pa_sink_volume_change_apply(s, NULL);
3759 /* Called from the main thread */
3760 /* Gets the list of formats supported by the sink. The members and idxset must
3761 * be freed by the caller. */
3762 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3767 if (s->get_formats) {
3768 /* Sink supports format query, all is good */
3769 ret = s->get_formats(s);
3771 /* Sink doesn't support format query, so assume it does PCM */
3772 pa_format_info *f = pa_format_info_new();
3773 f->encoding = PA_ENCODING_PCM;
3775 ret = pa_idxset_new(NULL, NULL);
3776 pa_idxset_put(ret, f, NULL);
3782 /* Called from the main thread */
3783 /* Allows an external source to set what formats a sink supports if the sink
3784 * permits this. The function makes a copy of the formats on success. */
3785 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3790 /* Sink supports setting formats -- let's give it a shot */
3791 return s->set_formats(s, formats);
3793 /* Sink doesn't support setting this -- bail out */
3797 /* Called from the main thread */
3798 /* Checks if the sink can accept this format */
3799 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3800 pa_idxset *formats = NULL;
3806 formats = pa_sink_get_formats(s);
3809 pa_format_info *finfo_device;
3812 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3813 if (pa_format_info_is_compatible(finfo_device, f)) {
3819 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3825 /* Called from the main thread */
3826 /* Calculates the intersection between formats supported by the sink and
3827 * in_formats, and returns these, in the order of the sink's formats. */
3828 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3829 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3830 pa_format_info *f_sink, *f_in;
3835 if (!in_formats || pa_idxset_isempty(in_formats))
3838 sink_formats = pa_sink_get_formats(s);
3840 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3841 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3842 if (pa_format_info_is_compatible(f_sink, f_in))
3843 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3849 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3854 /* Called from the main thread. */
3855 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3856 pa_cvolume old_volume;
3857 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3858 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3863 old_volume = s->reference_volume;
3865 if (pa_cvolume_equal(volume, &old_volume))
3868 s->reference_volume = *volume;
3869 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3870 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3871 s->flags & PA_SINK_DECIBEL_VOLUME),
3872 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3873 s->flags & PA_SINK_DECIBEL_VOLUME));
3875 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3876 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);