2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (pa_page_size())
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 struct pa_sink_volume_change {
64 PA_LLIST_FIELDS(pa_sink_volume_change);
67 struct sink_message_set_port {
72 static void sink_free(pa_object *s);
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
82 data->proplist = pa_proplist_new();
83 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
92 data->name = pa_xstrdup(name);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 data->alternate_sample_rate_is_set = true;
113 data->alternate_sample_rate = alternate_sample_rate;
116 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 if ((data->volume_is_set = !!volume))
120 data->volume = *volume;
123 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
126 data->muted_is_set = true;
130 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_xfree(data->active_port);
134 data->active_port = pa_xstrdup(port);
137 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_proplist_free(data->proplist);
143 pa_hashmap_free(data->ports);
145 pa_xfree(data->name);
146 pa_xfree(data->active_port);
149 /* Called from main context */
150 static void reset_callbacks(pa_sink *s) {
154 s->get_volume = NULL;
155 s->set_volume = NULL;
156 s->write_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
162 s->get_formats = NULL;
163 s->set_formats = NULL;
164 s->reconfigure = NULL;
167 /* Called from main context */
168 pa_sink* pa_sink_new(
170 pa_sink_new_data *data,
171 pa_sink_flags_t flags) {
175 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
176 pa_source_new_data source_data;
182 pa_assert(data->name);
183 pa_assert_ctl_context();
185 s = pa_msgobject_new(pa_sink);
187 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
188 pa_log_debug("Failed to register name %s.", data->name);
193 pa_sink_new_data_set_name(data, name);
195 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
197 pa_namereg_unregister(core, name);
201 /* FIXME, need to free s here on failure */
203 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
204 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
206 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
208 if (!data->channel_map_is_set)
209 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
211 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
212 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
214 /* FIXME: There should probably be a general function for checking whether
215 * the sink volume is allowed to be set, like there is for sink inputs. */
216 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
218 if (!data->volume_is_set) {
219 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
220 data->save_volume = false;
223 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
224 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
226 if (!data->muted_is_set)
230 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
232 pa_device_init_description(data->proplist, data->card);
233 pa_device_init_icon(data->proplist, true);
234 pa_device_init_intended_roles(data->proplist);
236 if (!data->active_port) {
237 pa_device_port *p = pa_device_port_find_best(data->ports);
239 pa_sink_new_data_set_port(data, p->name);
242 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
244 pa_namereg_unregister(core, name);
248 s->parent.parent.free = sink_free;
249 s->parent.process_msg = pa_sink_process_msg;
252 s->state = PA_SINK_INIT;
255 s->suspend_cause = data->suspend_cause;
256 pa_sink_set_mixer_dirty(s, false);
257 s->name = pa_xstrdup(name);
258 s->proplist = pa_proplist_copy(data->proplist);
259 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
260 s->module = data->module;
261 s->card = data->card;
263 s->priority = pa_device_init_priority(s->proplist);
265 s->sample_spec = data->sample_spec;
266 s->channel_map = data->channel_map;
267 s->default_sample_rate = s->sample_spec.rate;
269 if (data->alternate_sample_rate_is_set)
270 s->alternate_sample_rate = data->alternate_sample_rate;
272 s->alternate_sample_rate = s->core->alternate_sample_rate;
274 if (s->sample_spec.rate == s->alternate_sample_rate) {
275 pa_log_warn("Default and alternate sample rates are the same.");
276 s->alternate_sample_rate = 0;
279 s->inputs = pa_idxset_new(NULL, NULL);
281 s->input_to_master = NULL;
283 s->reference_volume = s->real_volume = data->volume;
284 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
285 s->base_volume = PA_VOLUME_NORM;
286 s->n_volume_steps = PA_VOLUME_NORM+1;
287 s->muted = data->muted;
288 s->refresh_volume = s->refresh_muted = false;
295 /* As a minor optimization we just steal the list instead of
297 s->ports = data->ports;
300 s->active_port = NULL;
301 s->save_port = false;
303 if (data->active_port)
304 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
305 s->save_port = data->save_port;
307 /* Hopefully the active port has already been assigned in the previous call
308 to pa_device_port_find_best, but better safe than sorry */
310 s->active_port = pa_device_port_find_best(s->ports);
313 s->port_latency_offset = s->active_port->latency_offset;
315 s->port_latency_offset = 0;
317 s->save_volume = data->save_volume;
318 s->save_muted = data->save_muted;
320 pa_silence_memchunk_get(
321 &core->silence_cache,
327 s->thread_info.rtpoll = NULL;
328 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
329 (pa_free_cb_t) pa_sink_input_unref);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = false;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = false;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.port_latency_offset = s->port_latency_offset;
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
382 pa_source_new_data_done(&source_data);
384 if (!s->monitor_source) {
390 s->monitor_source->monitor_of = s;
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
403 pa_sink_state_t original_state;
406 pa_assert_ctl_context();
408 if (s->state == state)
411 original_state = s->state;
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
418 if ((ret = s->set_state(s, state)) < 0)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
425 s->set_state(s, original_state);
430 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
433 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
434 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
435 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
438 if (suspend_change) {
442 /* We're suspending or resuming, tell everyone about it */
444 PA_IDXSET_FOREACH(i, s->inputs, idx)
445 if (s->state == PA_SINK_SUSPENDED &&
446 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
447 pa_sink_input_kill(i);
449 i->suspend(i, state == PA_SINK_SUSPENDED);
451 if (s->monitor_source)
452 pa_source_sync_suspend(s->monitor_source);
458 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
465 pa_sink_flags_t flags;
468 pa_assert(!s->write_volume || cb);
472 /* Save the current flags so we can tell if they've changed */
476 /* The sink implementor is responsible for setting decibel volume support */
477 s->flags |= PA_SINK_HW_VOLUME_CTRL;
479 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
480 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
481 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
484 /* If the flags have changed after init, let any clients know via a change event */
485 if (s->state != PA_SINK_INIT && flags != s->flags)
486 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
489 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
490 pa_sink_flags_t flags;
493 pa_assert(!cb || s->set_volume);
495 s->write_volume = cb;
497 /* Save the current flags so we can tell if they've changed */
501 s->flags |= PA_SINK_DEFERRED_VOLUME;
503 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
505 /* If the flags have changed after init, let any clients know via a change event */
506 if (s->state != PA_SINK_INIT && flags != s->flags)
507 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
510 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
516 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
517 pa_sink_flags_t flags;
523 /* Save the current flags so we can tell if they've changed */
527 s->flags |= PA_SINK_HW_MUTE_CTRL;
529 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
531 /* If the flags have changed after init, let any clients know via a change event */
532 if (s->state != PA_SINK_INIT && flags != s->flags)
533 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
536 static void enable_flat_volume(pa_sink *s, bool enable) {
537 pa_sink_flags_t flags;
541 /* Always follow the overall user preference here */
542 enable = enable && s->core->flat_volumes;
544 /* Save the current flags so we can tell if they've changed */
548 s->flags |= PA_SINK_FLAT_VOLUME;
550 s->flags &= ~PA_SINK_FLAT_VOLUME;
552 /* If the flags have changed after init, let any clients know via a change event */
553 if (s->state != PA_SINK_INIT && flags != s->flags)
554 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
557 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
558 pa_sink_flags_t flags;
562 /* Save the current flags so we can tell if they've changed */
566 s->flags |= PA_SINK_DECIBEL_VOLUME;
567 enable_flat_volume(s, true);
569 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
570 enable_flat_volume(s, false);
573 /* If the flags have changed after init, let any clients know via a change event */
574 if (s->state != PA_SINK_INIT && flags != s->flags)
575 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
578 /* Called from main context */
579 void pa_sink_put(pa_sink* s) {
580 pa_sink_assert_ref(s);
581 pa_assert_ctl_context();
583 pa_assert(s->state == PA_SINK_INIT);
584 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
586 /* The following fields must be initialized properly when calling _put() */
587 pa_assert(s->asyncmsgq);
588 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
590 /* Generally, flags should be initialized via pa_sink_new(). As a
591 * special exception we allow some volume related flags to be set
592 * between _new() and _put() by the callback setter functions above.
594 * Thus we implement a couple safeguards here which ensure the above
595 * setters were used (or at least the implementor made manual changes
596 * in a compatible way).
598 * Note: All of these flags set here can change over the life time
600 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
601 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
602 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
604 /* XXX: Currently decibel volume is disabled for all sinks that use volume
605 * sharing. When the master sink supports decibel volume, it would be good
606 * to have the flag also in the filter sink, but currently we don't do that
607 * so that the flags of the filter sink never change when it's moved from
608 * a master sink to another. One solution for this problem would be to
609 * remove user-visible volume altogether from filter sinks when volume
610 * sharing is used, but the current approach was easier to implement... */
611 /* We always support decibel volumes in software, otherwise we leave it to
612 * the sink implementor to set this flag as needed.
614 * Note: This flag can also change over the life time of the sink. */
615 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
616 pa_sink_enable_decibel_volume(s, true);
617 s->soft_volume = s->reference_volume;
620 /* If the sink implementor support DB volumes by itself, we should always
621 * try and enable flat volumes too */
622 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
623 enable_flat_volume(s, true);
625 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
626 pa_sink *root_sink = pa_sink_get_master(s);
628 pa_assert(root_sink);
630 s->reference_volume = root_sink->reference_volume;
631 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
633 s->real_volume = root_sink->real_volume;
634 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
636 /* We assume that if the sink implementor changed the default
637 * volume he did so in real_volume, because that is the usual
638 * place where he is supposed to place his changes. */
639 s->reference_volume = s->real_volume;
641 s->thread_info.soft_volume = s->soft_volume;
642 s->thread_info.soft_muted = s->muted;
643 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
645 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
646 || (s->base_volume == PA_VOLUME_NORM
647 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
648 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
649 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
650 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
651 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
653 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
654 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
655 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
657 if (s->suspend_cause)
658 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
660 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
662 pa_source_put(s->monitor_source);
664 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
665 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
667 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
668 * because module-switch-on-connect needs to know the old default sink */
669 pa_core_update_default_sink(s->core);
672 /* Called from main context */
673 void pa_sink_unlink(pa_sink* s) {
675 pa_sink_input *i, PA_UNUSED *j = NULL;
677 pa_sink_assert_ref(s);
678 pa_assert_ctl_context();
680 /* Please note that pa_sink_unlink() does more than simply
681 * reversing pa_sink_put(). It also undoes the registrations
682 * already done in pa_sink_new()! */
684 if (s->unlink_requested)
687 s->unlink_requested = true;
689 linked = PA_SINK_IS_LINKED(s->state);
692 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
694 if (s->state != PA_SINK_UNLINKED)
695 pa_namereg_unregister(s->core, s->name);
696 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
698 pa_core_update_default_sink(s->core);
701 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
703 while ((i = pa_idxset_first(s->inputs, NULL))) {
705 pa_sink_input_kill(i);
710 sink_set_state(s, PA_SINK_UNLINKED);
712 s->state = PA_SINK_UNLINKED;
716 if (s->monitor_source)
717 pa_source_unlink(s->monitor_source);
720 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
721 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
725 /* Called from main context */
726 static void sink_free(pa_object *o) {
727 pa_sink *s = PA_SINK(o);
730 pa_assert_ctl_context();
731 pa_assert(pa_sink_refcnt(s) == 0);
732 pa_assert(!PA_SINK_IS_LINKED(s->state));
734 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
736 pa_sink_volume_change_flush(s);
738 if (s->monitor_source) {
739 pa_source_unref(s->monitor_source);
740 s->monitor_source = NULL;
743 pa_idxset_free(s->inputs, NULL);
744 pa_hashmap_free(s->thread_info.inputs);
746 if (s->silence.memblock)
747 pa_memblock_unref(s->silence.memblock);
753 pa_proplist_free(s->proplist);
756 pa_hashmap_free(s->ports);
761 /* Called from main context, and not while the IO thread is active, please */
762 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
763 pa_sink_assert_ref(s);
764 pa_assert_ctl_context();
768 if (s->monitor_source)
769 pa_source_set_asyncmsgq(s->monitor_source, q);
772 /* Called from main context, and not while the IO thread is active, please */
773 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
774 pa_sink_flags_t old_flags;
775 pa_sink_input *input;
778 pa_sink_assert_ref(s);
779 pa_assert_ctl_context();
781 /* For now, allow only a minimal set of flags to be changed. */
782 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
784 old_flags = s->flags;
785 s->flags = (s->flags & ~mask) | (value & mask);
787 if (s->flags == old_flags)
790 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
791 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
793 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
794 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
795 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
797 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
798 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
800 if (s->monitor_source)
801 pa_source_update_flags(s->monitor_source,
802 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
803 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
804 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
805 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
807 PA_IDXSET_FOREACH(input, s->inputs, idx) {
808 if (input->origin_sink)
809 pa_sink_update_flags(input->origin_sink, mask, value);
813 /* Called from IO context, or before _put() from main context */
814 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
815 pa_sink_assert_ref(s);
816 pa_sink_assert_io_context(s);
818 s->thread_info.rtpoll = p;
820 if (s->monitor_source)
821 pa_source_set_rtpoll(s->monitor_source, p);
824 /* Called from main context */
825 int pa_sink_update_status(pa_sink*s) {
826 pa_sink_assert_ref(s);
827 pa_assert_ctl_context();
828 pa_assert(PA_SINK_IS_LINKED(s->state));
830 if (s->state == PA_SINK_SUSPENDED)
833 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
836 /* Called from any context - must be threadsafe */
837 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
838 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
841 /* Called from main context */
842 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
843 pa_suspend_cause_t old_cause;
844 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
845 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
847 pa_sink_assert_ref(s);
848 pa_assert_ctl_context();
849 pa_assert(PA_SINK_IS_LINKED(s->state));
850 pa_assert(cause != 0);
852 old_cause = s->suspend_cause;
855 s->suspend_cause |= cause;
856 s->monitor_source->suspend_cause |= cause;
858 s->suspend_cause &= ~cause;
859 s->monitor_source->suspend_cause &= ~cause;
862 if (s->suspend_cause != old_cause) {
863 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(old_cause, old_cause_buf),
864 pa_suspend_cause_to_string(s->suspend_cause, new_cause_buf));
867 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
868 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
869 it'll be handled just fine. */
870 pa_sink_set_mixer_dirty(s, false);
871 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
872 if (s->active_port && s->set_port) {
873 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
874 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
875 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
878 s->set_port(s, s->active_port);
888 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
891 if (s->suspend_cause)
892 return sink_set_state(s, PA_SINK_SUSPENDED);
894 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
897 /* Called from main context */
898 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
899 pa_sink_input *i, *n;
902 pa_sink_assert_ref(s);
903 pa_assert_ctl_context();
904 pa_assert(PA_SINK_IS_LINKED(s->state));
909 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
910 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
912 pa_sink_input_ref(i);
914 if (pa_sink_input_start_move(i) >= 0)
917 pa_sink_input_unref(i);
923 /* Called from main context */
924 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
927 pa_sink_assert_ref(s);
928 pa_assert_ctl_context();
929 pa_assert(PA_SINK_IS_LINKED(s->state));
932 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
933 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
934 if (pa_sink_input_finish_move(i, s, save) < 0)
935 pa_sink_input_fail_move(i);
938 pa_sink_input_unref(i);
941 pa_queue_free(q, NULL);
944 /* Called from main context */
945 void pa_sink_move_all_fail(pa_queue *q) {
948 pa_assert_ctl_context();
951 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
952 pa_sink_input_fail_move(i);
953 pa_sink_input_unref(i);
956 pa_queue_free(q, NULL);
959 /* Called from IO thread context */
960 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
965 pa_sink_assert_ref(s);
966 pa_sink_assert_io_context(s);
968 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
969 size_t uf = i->thread_info.underrun_for_sink;
971 /* Propagate down the filter tree */
972 if (i->origin_sink) {
973 size_t filter_result, left_to_play_origin;
975 /* The recursive call works in the origin sink domain ... */
976 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
978 /* .. and returns the time to sleep before waking up. We need the
979 * underrun duration for comparisons, so we undo the subtraction on
980 * the return value... */
981 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
983 /* ... and convert it back to the master sink domain */
984 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
986 /* Remember the longest underrun so far */
987 if (filter_result > result)
988 result = filter_result;
992 /* No underrun here, move on */
994 } else if (uf >= left_to_play) {
995 /* The sink has possibly consumed all the data the sink input provided */
996 pa_sink_input_process_underrun(i);
997 } else if (uf > result) {
998 /* Remember the longest underrun so far */
1004 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1005 (long) result, (long) left_to_play - result);
1006 return left_to_play - result;
1009 /* Called from IO thread context */
1010 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1014 pa_sink_assert_ref(s);
1015 pa_sink_assert_io_context(s);
1016 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1018 /* If nobody requested this and this is actually no real rewind
1019 * then we can short cut this. Please note that this means that
1020 * not all rewind requests triggered upstream will always be
1021 * translated in actual requests! */
1022 if (!s->thread_info.rewind_requested && nbytes <= 0)
1025 s->thread_info.rewind_nbytes = 0;
1026 s->thread_info.rewind_requested = false;
1029 pa_log_debug("Processing rewind...");
1030 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1031 pa_sink_volume_change_rewind(s, nbytes);
1034 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1035 pa_sink_input_assert_ref(i);
1036 pa_sink_input_process_rewind(i, nbytes);
1040 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1041 pa_source_process_rewind(s->monitor_source, nbytes);
1045 /* Called from IO thread context */
1046 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1050 size_t mixlength = *length;
1052 pa_sink_assert_ref(s);
1053 pa_sink_assert_io_context(s);
1056 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1057 pa_sink_input_assert_ref(i);
1059 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1061 if (mixlength == 0 || info->chunk.length < mixlength)
1062 mixlength = info->chunk.length;
1064 if (pa_memblock_is_silence(info->chunk.memblock)) {
1065 pa_memblock_unref(info->chunk.memblock);
1069 info->userdata = pa_sink_input_ref(i);
1071 pa_assert(info->chunk.memblock);
1072 pa_assert(info->chunk.length > 0);
1080 *length = mixlength;
1085 /* Called from IO thread context */
1086 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1090 unsigned n_unreffed = 0;
1092 pa_sink_assert_ref(s);
1093 pa_sink_assert_io_context(s);
1095 pa_assert(result->memblock);
1096 pa_assert(result->length > 0);
1098 /* We optimize for the case where the order of the inputs has not changed */
1100 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1102 pa_mix_info* m = NULL;
1104 pa_sink_input_assert_ref(i);
1106 /* Let's try to find the matching entry info the pa_mix_info array */
1107 for (j = 0; j < n; j ++) {
1109 if (info[p].userdata == i) {
1119 /* Drop read data */
1120 pa_sink_input_drop(i, result->length);
1122 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1124 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1125 void *ostate = NULL;
1126 pa_source_output *o;
1129 if (m && m->chunk.memblock) {
1131 pa_memblock_ref(c.memblock);
1132 pa_assert(result->length <= c.length);
1133 c.length = result->length;
1135 pa_memchunk_make_writable(&c, 0);
1136 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1139 pa_memblock_ref(c.memblock);
1140 pa_assert(result->length <= c.length);
1141 c.length = result->length;
1144 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1145 pa_source_output_assert_ref(o);
1146 pa_assert(o->direct_on_input == i);
1147 pa_source_post_direct(s->monitor_source, o, &c);
1150 pa_memblock_unref(c.memblock);
1155 if (m->chunk.memblock) {
1156 pa_memblock_unref(m->chunk.memblock);
1157 pa_memchunk_reset(&m->chunk);
1160 pa_sink_input_unref(m->userdata);
1167 /* Now drop references to entries that are included in the
1168 * pa_mix_info array but don't exist anymore */
1170 if (n_unreffed < n) {
1171 for (; n > 0; info++, n--) {
1173 pa_sink_input_unref(info->userdata);
1174 if (info->chunk.memblock)
1175 pa_memblock_unref(info->chunk.memblock);
1179 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1180 pa_source_post(s->monitor_source, result);
1183 /* Called from IO thread context */
1184 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1185 pa_mix_info info[MAX_MIX_CHANNELS];
1187 size_t block_size_max;
1189 pa_sink_assert_ref(s);
1190 pa_sink_assert_io_context(s);
1191 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1192 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1195 pa_assert(!s->thread_info.rewind_requested);
1196 pa_assert(s->thread_info.rewind_nbytes == 0);
1198 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1199 result->memblock = pa_memblock_ref(s->silence.memblock);
1200 result->index = s->silence.index;
1201 result->length = PA_MIN(s->silence.length, length);
1208 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1210 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1211 if (length > block_size_max)
1212 length = pa_frame_align(block_size_max, &s->sample_spec);
1214 pa_assert(length > 0);
1216 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1220 *result = s->silence;
1221 pa_memblock_ref(result->memblock);
1223 if (result->length > length)
1224 result->length = length;
1226 } else if (n == 1) {
1229 *result = info[0].chunk;
1230 pa_memblock_ref(result->memblock);
1232 if (result->length > length)
1233 result->length = length;
1235 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1237 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1238 pa_memblock_unref(result->memblock);
1239 pa_silence_memchunk_get(&s->core->silence_cache,
1244 } else if (!pa_cvolume_is_norm(&volume)) {
1245 pa_memchunk_make_writable(result, 0);
1246 pa_volume_memchunk(result, &s->sample_spec, &volume);
1250 result->memblock = pa_memblock_new(s->core->mempool, length);
1252 ptr = pa_memblock_acquire(result->memblock);
1253 result->length = pa_mix(info, n,
1256 &s->thread_info.soft_volume,
1257 s->thread_info.soft_muted);
1258 pa_memblock_release(result->memblock);
1263 inputs_drop(s, info, n, result);
1268 /* Called from IO thread context */
1269 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1270 pa_mix_info info[MAX_MIX_CHANNELS];
1272 size_t length, block_size_max;
1274 pa_sink_assert_ref(s);
1275 pa_sink_assert_io_context(s);
1276 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1278 pa_assert(target->memblock);
1279 pa_assert(target->length > 0);
1280 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1282 pa_assert(!s->thread_info.rewind_requested);
1283 pa_assert(s->thread_info.rewind_nbytes == 0);
1285 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1286 pa_silence_memchunk(target, &s->sample_spec);
1292 length = target->length;
1293 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1294 if (length > block_size_max)
1295 length = pa_frame_align(block_size_max, &s->sample_spec);
1297 pa_assert(length > 0);
1299 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1302 if (target->length > length)
1303 target->length = length;
1305 pa_silence_memchunk(target, &s->sample_spec);
1306 } else if (n == 1) {
1309 if (target->length > length)
1310 target->length = length;
1312 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1314 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1315 pa_silence_memchunk(target, &s->sample_spec);
1319 vchunk = info[0].chunk;
1320 pa_memblock_ref(vchunk.memblock);
1322 if (vchunk.length > length)
1323 vchunk.length = length;
1325 if (!pa_cvolume_is_norm(&volume)) {
1326 pa_memchunk_make_writable(&vchunk, 0);
1327 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1330 pa_memchunk_memcpy(target, &vchunk);
1331 pa_memblock_unref(vchunk.memblock);
1337 ptr = pa_memblock_acquire(target->memblock);
1339 target->length = pa_mix(info, n,
1340 (uint8_t*) ptr + target->index, length,
1342 &s->thread_info.soft_volume,
1343 s->thread_info.soft_muted);
1345 pa_memblock_release(target->memblock);
1348 inputs_drop(s, info, n, target);
1353 /* Called from IO thread context */
1354 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1358 pa_sink_assert_ref(s);
1359 pa_sink_assert_io_context(s);
1360 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1362 pa_assert(target->memblock);
1363 pa_assert(target->length > 0);
1364 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1366 pa_assert(!s->thread_info.rewind_requested);
1367 pa_assert(s->thread_info.rewind_nbytes == 0);
1369 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1370 pa_silence_memchunk(target, &s->sample_spec);
1383 pa_sink_render_into(s, &chunk);
1392 /* Called from IO thread context */
1393 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1394 pa_sink_assert_ref(s);
1395 pa_sink_assert_io_context(s);
1396 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1397 pa_assert(length > 0);
1398 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1401 pa_assert(!s->thread_info.rewind_requested);
1402 pa_assert(s->thread_info.rewind_nbytes == 0);
1406 pa_sink_render(s, length, result);
1408 if (result->length < length) {
1411 pa_memchunk_make_writable(result, length);
1413 chunk.memblock = result->memblock;
1414 chunk.index = result->index + result->length;
1415 chunk.length = length - result->length;
1417 pa_sink_render_into_full(s, &chunk);
1419 result->length = length;
1425 /* Called from main thread */
1426 int pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1428 pa_sample_spec desired_spec;
1429 uint32_t default_rate = s->default_sample_rate;
1430 uint32_t alternate_rate = s->alternate_sample_rate;
1433 bool default_rate_is_usable = false;
1434 bool alternate_rate_is_usable = false;
1435 bool avoid_resampling = s->core->avoid_resampling;
1437 /* We currently only try to reconfigure the sample rate */
1439 if (pa_sample_spec_equal(spec, &s->sample_spec))
1442 if (!s->reconfigure)
1445 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1446 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1450 if (PA_SINK_IS_RUNNING(s->state)) {
1451 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1452 s->sample_spec.rate);
1456 if (s->monitor_source) {
1457 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1458 pa_log_info("Cannot update rate, monitor source is RUNNING");
1463 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1466 desired_spec = s->sample_spec;
1469 /* We have to try to use the sink input rate */
1470 desired_spec.rate = spec->rate;
1472 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1473 /* We just try to set the sink input's sample rate if it's not too low */
1474 desired_spec.rate = spec->rate;
1476 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1477 /* We can directly try to use this rate */
1478 desired_spec.rate = spec->rate;
1481 /* See if we can pick a rate that results in less resampling effort */
1482 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1483 default_rate_is_usable = true;
1484 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1485 default_rate_is_usable = true;
1486 if (alternate_rate && alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1487 alternate_rate_is_usable = true;
1488 if (alternate_rate && alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1489 alternate_rate_is_usable = true;
1491 if (alternate_rate_is_usable && !default_rate_is_usable)
1492 desired_spec.rate = alternate_rate;
1494 desired_spec.rate = default_rate;
1497 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1500 if (!passthrough && pa_sink_used_by(s) > 0)
1503 pa_log_debug("Suspending sink %s due to changing format.", s->name);
1504 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1506 if (s->reconfigure(s, &desired_spec, passthrough) >= 0) {
1507 /* update monitor source as well */
1508 if (s->monitor_source && !passthrough)
1509 pa_source_reconfigure(s->monitor_source, &desired_spec, false);
1510 pa_log_info("Changed format successfully");
1512 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1513 if (i->state == PA_SINK_INPUT_CORKED)
1514 pa_sink_input_update_rate(i);
1520 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1525 /* Called from main thread */
1526 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1529 pa_sink_assert_ref(s);
1530 pa_assert_ctl_context();
1531 pa_assert(PA_SINK_IS_LINKED(s->state));
1533 /* The returned value is supposed to be in the time domain of the sound card! */
1535 if (s->state == PA_SINK_SUSPENDED)
1538 if (!(s->flags & PA_SINK_LATENCY))
1541 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1543 /* the return value is unsigned, so check that the offset can be added to usec without
1545 if (-s->port_latency_offset <= usec)
1546 usec += s->port_latency_offset;
1550 return (pa_usec_t)usec;
1553 /* Called from IO thread */
1554 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1558 pa_sink_assert_ref(s);
1559 pa_sink_assert_io_context(s);
1560 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1562 /* The returned value is supposed to be in the time domain of the sound card! */
1564 if (s->thread_info.state == PA_SINK_SUSPENDED)
1567 if (!(s->flags & PA_SINK_LATENCY))
1570 o = PA_MSGOBJECT(s);
1572 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1574 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1576 /* If allow_negative is false, the call should only return positive values, */
1577 usec += s->thread_info.port_latency_offset;
1578 if (!allow_negative && usec < 0)
1584 /* Called from the main thread (and also from the IO thread while the main
1585 * thread is waiting).
1587 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1588 * set. Instead, flat volume mode is detected by checking whether the root sink
1589 * has the flag set. */
1590 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1591 pa_sink_assert_ref(s);
1593 s = pa_sink_get_master(s);
1596 return (s->flags & PA_SINK_FLAT_VOLUME);
1601 /* Called from the main thread (and also from the IO thread while the main
1602 * thread is waiting). */
1603 pa_sink *pa_sink_get_master(pa_sink *s) {
1604 pa_sink_assert_ref(s);
1606 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1607 if (PA_UNLIKELY(!s->input_to_master))
1610 s = s->input_to_master->sink;
1616 /* Called from main context */
1617 bool pa_sink_is_filter(pa_sink *s) {
1618 pa_sink_assert_ref(s);
1620 return (s->input_to_master != NULL);
1623 /* Called from main context */
1624 bool pa_sink_is_passthrough(pa_sink *s) {
1625 pa_sink_input *alt_i;
1628 pa_sink_assert_ref(s);
1630 /* one and only one PASSTHROUGH input can possibly be connected */
1631 if (pa_idxset_size(s->inputs) == 1) {
1632 alt_i = pa_idxset_first(s->inputs, &idx);
1634 if (pa_sink_input_is_passthrough(alt_i))
1641 /* Called from main context */
1642 void pa_sink_enter_passthrough(pa_sink *s) {
1645 /* The sink implementation is reconfigured for passthrough in
1646 * pa_sink_reconfigure(). This function sets the PA core objects to
1647 * passthrough mode. */
1649 /* disable the monitor in passthrough mode */
1650 if (s->monitor_source) {
1651 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1652 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1655 /* set the volume to NORM */
1656 s->saved_volume = *pa_sink_get_volume(s, true);
1657 s->saved_save_volume = s->save_volume;
1659 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1660 pa_sink_set_volume(s, &volume, true, false);
1662 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1665 /* Called from main context */
1666 void pa_sink_leave_passthrough(pa_sink *s) {
1667 /* Unsuspend monitor */
1668 if (s->monitor_source) {
1669 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1670 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1673 /* Restore sink volume to what it was before we entered passthrough mode */
1674 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1676 pa_cvolume_init(&s->saved_volume);
1677 s->saved_save_volume = false;
1681 /* Called from main context. */
1682 static void compute_reference_ratio(pa_sink_input *i) {
1684 pa_cvolume remapped;
1688 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1691 * Calculates the reference ratio from the sink's reference
1692 * volume. This basically calculates:
1694 * i->reference_ratio = i->volume / i->sink->reference_volume
1697 remapped = i->sink->reference_volume;
1698 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1700 ratio = i->reference_ratio;
1702 for (c = 0; c < i->sample_spec.channels; c++) {
1704 /* We don't update when the sink volume is 0 anyway */
1705 if (remapped.values[c] <= PA_VOLUME_MUTED)
1708 /* Don't update the reference ratio unless necessary */
1709 if (pa_sw_volume_multiply(
1711 remapped.values[c]) == i->volume.values[c])
1714 ratio.values[c] = pa_sw_volume_divide(
1715 i->volume.values[c],
1716 remapped.values[c]);
1719 pa_sink_input_set_reference_ratio(i, &ratio);
1722 /* Called from main context. Only called for the root sink in volume sharing
1723 * cases, except for internal recursive calls. */
1724 static void compute_reference_ratios(pa_sink *s) {
1728 pa_sink_assert_ref(s);
1729 pa_assert_ctl_context();
1730 pa_assert(PA_SINK_IS_LINKED(s->state));
1731 pa_assert(pa_sink_flat_volume_enabled(s));
1733 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1734 compute_reference_ratio(i);
1736 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1737 && PA_SINK_IS_LINKED(i->origin_sink->state))
1738 compute_reference_ratios(i->origin_sink);
1742 /* Called from main context. Only called for the root sink in volume sharing
1743 * cases, except for internal recursive calls. */
1744 static void compute_real_ratios(pa_sink *s) {
1748 pa_sink_assert_ref(s);
1749 pa_assert_ctl_context();
1750 pa_assert(PA_SINK_IS_LINKED(s->state));
1751 pa_assert(pa_sink_flat_volume_enabled(s));
1753 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1755 pa_cvolume remapped;
1757 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1758 /* The origin sink uses volume sharing, so this input's real ratio
1759 * is handled as a special case - the real ratio must be 0 dB, and
1760 * as a result i->soft_volume must equal i->volume_factor. */
1761 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1762 i->soft_volume = i->volume_factor;
1764 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1765 compute_real_ratios(i->origin_sink);
1771 * This basically calculates:
1773 * i->real_ratio := i->volume / s->real_volume
1774 * i->soft_volume := i->real_ratio * i->volume_factor
1777 remapped = s->real_volume;
1778 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1780 i->real_ratio.channels = i->sample_spec.channels;
1781 i->soft_volume.channels = i->sample_spec.channels;
1783 for (c = 0; c < i->sample_spec.channels; c++) {
1785 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1786 /* We leave i->real_ratio untouched */
1787 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1791 /* Don't lose accuracy unless necessary */
1792 if (pa_sw_volume_multiply(
1793 i->real_ratio.values[c],
1794 remapped.values[c]) != i->volume.values[c])
1796 i->real_ratio.values[c] = pa_sw_volume_divide(
1797 i->volume.values[c],
1798 remapped.values[c]);
1800 i->soft_volume.values[c] = pa_sw_volume_multiply(
1801 i->real_ratio.values[c],
1802 i->volume_factor.values[c]);
1805 /* We don't copy the soft_volume to the thread_info data
1806 * here. That must be done by the caller */
1810 static pa_cvolume *cvolume_remap_minimal_impact(
1812 const pa_cvolume *template,
1813 const pa_channel_map *from,
1814 const pa_channel_map *to) {
1819 pa_assert(template);
1822 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1823 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1825 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1826 * mapping from sink input to sink volumes:
1828 * If template is a possible remapping from v it is used instead
1829 * of remapping anew.
1831 * If the channel maps don't match we set an all-channel volume on
1832 * the sink to ensure that changing a volume on one stream has no
1833 * effect that cannot be compensated for in another stream that
1834 * does not have the same channel map as the sink. */
1836 if (pa_channel_map_equal(from, to))
1840 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1845 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1849 /* Called from main thread. Only called for the root sink in volume sharing
1850 * cases, except for internal recursive calls. */
1851 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1855 pa_sink_assert_ref(s);
1856 pa_assert(max_volume);
1857 pa_assert(channel_map);
1858 pa_assert(pa_sink_flat_volume_enabled(s));
1860 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1861 pa_cvolume remapped;
1863 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1864 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1865 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1867 /* Ignore this input. The origin sink uses volume sharing, so this
1868 * input's volume will be set to be equal to the root sink's real
1869 * volume. Obviously this input's current volume must not then
1870 * affect what the root sink's real volume will be. */
1874 remapped = i->volume;
1875 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1876 pa_cvolume_merge(max_volume, max_volume, &remapped);
1880 /* Called from main thread. Only called for the root sink in volume sharing
1881 * cases, except for internal recursive calls. */
1882 static bool has_inputs(pa_sink *s) {
1886 pa_sink_assert_ref(s);
1888 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1889 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1896 /* Called from main thread. Only called for the root sink in volume sharing
1897 * cases, except for internal recursive calls. */
1898 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1902 pa_sink_assert_ref(s);
1903 pa_assert(new_volume);
1904 pa_assert(channel_map);
1906 s->real_volume = *new_volume;
1907 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1909 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1910 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1911 if (pa_sink_flat_volume_enabled(s)) {
1912 pa_cvolume new_input_volume;
1914 /* Follow the root sink's real volume. */
1915 new_input_volume = *new_volume;
1916 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
1917 pa_sink_input_set_volume_direct(i, &new_input_volume);
1918 compute_reference_ratio(i);
1921 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1922 update_real_volume(i->origin_sink, new_volume, channel_map);
1927 /* Called from main thread. Only called for the root sink in shared volume
1929 static void compute_real_volume(pa_sink *s) {
1930 pa_sink_assert_ref(s);
1931 pa_assert_ctl_context();
1932 pa_assert(PA_SINK_IS_LINKED(s->state));
1933 pa_assert(pa_sink_flat_volume_enabled(s));
1934 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1936 /* This determines the maximum volume of all streams and sets
1937 * s->real_volume accordingly. */
1939 if (!has_inputs(s)) {
1940 /* In the special case that we have no sink inputs we leave the
1941 * volume unmodified. */
1942 update_real_volume(s, &s->reference_volume, &s->channel_map);
1946 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1948 /* First let's determine the new maximum volume of all inputs
1949 * connected to this sink */
1950 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1951 update_real_volume(s, &s->real_volume, &s->channel_map);
1953 /* Then, let's update the real ratios/soft volumes of all inputs
1954 * connected to this sink */
1955 compute_real_ratios(s);
1958 /* Called from main thread. Only called for the root sink in shared volume
1959 * cases, except for internal recursive calls. */
1960 static void propagate_reference_volume(pa_sink *s) {
1964 pa_sink_assert_ref(s);
1965 pa_assert_ctl_context();
1966 pa_assert(PA_SINK_IS_LINKED(s->state));
1967 pa_assert(pa_sink_flat_volume_enabled(s));
1969 /* This is called whenever the sink volume changes that is not
1970 * caused by a sink input volume change. We need to fix up the
1971 * sink input volumes accordingly */
1973 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1974 pa_cvolume new_volume;
1976 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1977 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1978 propagate_reference_volume(i->origin_sink);
1980 /* Since the origin sink uses volume sharing, this input's volume
1981 * needs to be updated to match the root sink's real volume, but
1982 * that will be done later in update_real_volume(). */
1986 /* This basically calculates:
1988 * i->volume := s->reference_volume * i->reference_ratio */
1990 new_volume = s->reference_volume;
1991 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
1992 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
1993 pa_sink_input_set_volume_direct(i, &new_volume);
1997 /* Called from main thread. Only called for the root sink in volume sharing
1998 * cases, except for internal recursive calls. The return value indicates
1999 * whether any reference volume actually changed. */
2000 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2002 bool reference_volume_changed;
2006 pa_sink_assert_ref(s);
2007 pa_assert(PA_SINK_IS_LINKED(s->state));
2009 pa_assert(channel_map);
2010 pa_assert(pa_cvolume_valid(v));
2013 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2015 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2016 pa_sink_set_reference_volume_direct(s, &volume);
2018 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2020 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2021 /* If the root sink's volume doesn't change, then there can't be any
2022 * changes in the other sinks in the sink tree either.
2024 * It's probably theoretically possible that even if the root sink's
2025 * volume changes slightly, some filter sink doesn't change its volume
2026 * due to rounding errors. If that happens, we still want to propagate
2027 * the changed root sink volume to the sinks connected to the
2028 * intermediate sink that didn't change its volume. This theoretical
2029 * possibility is the reason why we have that !(s->flags &
2030 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2031 * notice even if we returned here false always if
2032 * reference_volume_changed is false. */
2035 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2036 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2037 && PA_SINK_IS_LINKED(i->origin_sink->state))
2038 update_reference_volume(i->origin_sink, v, channel_map, false);
2044 /* Called from main thread */
2045 void pa_sink_set_volume(
2047 const pa_cvolume *volume,
2051 pa_cvolume new_reference_volume;
2054 pa_sink_assert_ref(s);
2055 pa_assert_ctl_context();
2056 pa_assert(PA_SINK_IS_LINKED(s->state));
2057 pa_assert(!volume || pa_cvolume_valid(volume));
2058 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2059 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2061 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2062 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2063 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2064 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2068 /* In case of volume sharing, the volume is set for the root sink first,
2069 * from which it's then propagated to the sharing sinks. */
2070 root_sink = pa_sink_get_master(s);
2072 if (PA_UNLIKELY(!root_sink))
2075 /* As a special exception we accept mono volumes on all sinks --
2076 * even on those with more complex channel maps */
2079 if (pa_cvolume_compatible(volume, &s->sample_spec))
2080 new_reference_volume = *volume;
2082 new_reference_volume = s->reference_volume;
2083 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2086 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2088 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2089 if (pa_sink_flat_volume_enabled(root_sink)) {
2090 /* OK, propagate this volume change back to the inputs */
2091 propagate_reference_volume(root_sink);
2093 /* And now recalculate the real volume */
2094 compute_real_volume(root_sink);
2096 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2100 /* If volume is NULL we synchronize the sink's real and
2101 * reference volumes with the stream volumes. */
2103 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2105 /* Ok, let's determine the new real volume */
2106 compute_real_volume(root_sink);
2108 /* Let's 'push' the reference volume if necessary */
2109 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2110 /* If the sink and its root don't have the same number of channels, we need to remap */
2111 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2112 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2113 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2115 /* Now that the reference volume is updated, we can update the streams'
2116 * reference ratios. */
2117 compute_reference_ratios(root_sink);
2120 if (root_sink->set_volume) {
2121 /* If we have a function set_volume(), then we do not apply a
2122 * soft volume by default. However, set_volume() is free to
2123 * apply one to root_sink->soft_volume */
2125 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2126 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2127 root_sink->set_volume(root_sink);
2130 /* If we have no function set_volume(), then the soft volume
2131 * becomes the real volume */
2132 root_sink->soft_volume = root_sink->real_volume;
2134 /* This tells the sink that soft volume and/or real volume changed */
2136 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2139 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2140 * Only to be called by sink implementor */
2141 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2143 pa_sink_assert_ref(s);
2144 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2146 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2147 pa_sink_assert_io_context(s);
2149 pa_assert_ctl_context();
2152 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2154 s->soft_volume = *volume;
2156 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2157 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2159 s->thread_info.soft_volume = s->soft_volume;
2162 /* Called from the main thread. Only called for the root sink in volume sharing
2163 * cases, except for internal recursive calls. */
2164 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2168 pa_sink_assert_ref(s);
2169 pa_assert(old_real_volume);
2170 pa_assert_ctl_context();
2171 pa_assert(PA_SINK_IS_LINKED(s->state));
2173 /* This is called when the hardware's real volume changes due to
2174 * some external event. We copy the real volume into our
2175 * reference volume and then rebuild the stream volumes based on
2176 * i->real_ratio which should stay fixed. */
2178 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2179 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2182 /* 1. Make the real volume the reference volume */
2183 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2186 if (pa_sink_flat_volume_enabled(s)) {
2188 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2189 pa_cvolume new_volume;
2191 /* 2. Since the sink's reference and real volumes are equal
2192 * now our ratios should be too. */
2193 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2195 /* 3. Recalculate the new stream reference volume based on the
2196 * reference ratio and the sink's reference volume.
2198 * This basically calculates:
2200 * i->volume = s->reference_volume * i->reference_ratio
2202 * This is identical to propagate_reference_volume() */
2203 new_volume = s->reference_volume;
2204 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2205 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2206 pa_sink_input_set_volume_direct(i, &new_volume);
2208 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2209 && PA_SINK_IS_LINKED(i->origin_sink->state))
2210 propagate_real_volume(i->origin_sink, old_real_volume);
2214 /* Something got changed in the hardware. It probably makes sense
2215 * to save changed hw settings given that hw volume changes not
2216 * triggered by PA are almost certainly done by the user. */
2217 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2218 s->save_volume = true;
2221 /* Called from io thread */
2222 void pa_sink_update_volume_and_mute(pa_sink *s) {
2224 pa_sink_assert_io_context(s);
2226 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2229 /* Called from main thread */
2230 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2231 pa_sink_assert_ref(s);
2232 pa_assert_ctl_context();
2233 pa_assert(PA_SINK_IS_LINKED(s->state));
2235 if (s->refresh_volume || force_refresh) {
2236 struct pa_cvolume old_real_volume;
2238 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2240 old_real_volume = s->real_volume;
2242 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2245 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2247 update_real_volume(s, &s->real_volume, &s->channel_map);
2248 propagate_real_volume(s, &old_real_volume);
2251 return &s->reference_volume;
2254 /* Called from main thread. In volume sharing cases, only the root sink may
2256 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2257 pa_cvolume old_real_volume;
2259 pa_sink_assert_ref(s);
2260 pa_assert_ctl_context();
2261 pa_assert(PA_SINK_IS_LINKED(s->state));
2262 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2264 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2266 old_real_volume = s->real_volume;
2267 update_real_volume(s, new_real_volume, &s->channel_map);
2268 propagate_real_volume(s, &old_real_volume);
2271 /* Called from main thread */
2272 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2275 pa_sink_assert_ref(s);
2276 pa_assert_ctl_context();
2278 old_muted = s->muted;
2280 if (mute == old_muted) {
2281 s->save_muted |= save;
2286 s->save_muted = save;
2288 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2289 s->set_mute_in_progress = true;
2291 s->set_mute_in_progress = false;
2294 if (!PA_SINK_IS_LINKED(s->state))
2297 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2298 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2299 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2300 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2303 /* Called from main thread */
2304 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2306 pa_sink_assert_ref(s);
2307 pa_assert_ctl_context();
2308 pa_assert(PA_SINK_IS_LINKED(s->state));
2310 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2313 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2314 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2315 pa_sink_mute_changed(s, mute);
2317 if (s->get_mute(s, &mute) >= 0)
2318 pa_sink_mute_changed(s, mute);
2325 /* Called from main thread */
2326 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2327 pa_sink_assert_ref(s);
2328 pa_assert_ctl_context();
2329 pa_assert(PA_SINK_IS_LINKED(s->state));
2331 if (s->set_mute_in_progress)
2334 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2335 * but we must have this here also, because the save parameter of
2336 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2337 * the mute state when it shouldn't be saved). */
2338 if (new_muted == s->muted)
2341 pa_sink_set_mute(s, new_muted, true);
2344 /* Called from main thread */
2345 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2346 pa_sink_assert_ref(s);
2347 pa_assert_ctl_context();
2350 pa_proplist_update(s->proplist, mode, p);
2352 if (PA_SINK_IS_LINKED(s->state)) {
2353 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2354 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2360 /* Called from main thread */
2361 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2362 void pa_sink_set_description(pa_sink *s, const char *description) {
2364 pa_sink_assert_ref(s);
2365 pa_assert_ctl_context();
2367 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2370 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2372 if (old && description && pa_streq(old, description))
2376 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2378 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2380 if (s->monitor_source) {
2383 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2384 pa_source_set_description(s->monitor_source, n);
2388 if (PA_SINK_IS_LINKED(s->state)) {
2389 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2390 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2394 /* Called from main thread */
2395 unsigned pa_sink_linked_by(pa_sink *s) {
2398 pa_sink_assert_ref(s);
2399 pa_assert_ctl_context();
2400 pa_assert(PA_SINK_IS_LINKED(s->state));
2402 ret = pa_idxset_size(s->inputs);
2404 /* We add in the number of streams connected to us here. Please
2405 * note the asymmetry to pa_sink_used_by()! */
2407 if (s->monitor_source)
2408 ret += pa_source_linked_by(s->monitor_source);
2413 /* Called from main thread */
2414 unsigned pa_sink_used_by(pa_sink *s) {
2417 pa_sink_assert_ref(s);
2418 pa_assert_ctl_context();
2419 pa_assert(PA_SINK_IS_LINKED(s->state));
2421 ret = pa_idxset_size(s->inputs);
2422 pa_assert(ret >= s->n_corked);
2424 /* Streams connected to our monitor source do not matter for
2425 * pa_sink_used_by()!.*/
2427 return ret - s->n_corked;
2430 /* Called from main thread */
2431 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2436 pa_sink_assert_ref(s);
2437 pa_assert_ctl_context();
2439 if (!PA_SINK_IS_LINKED(s->state))
2444 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2445 pa_sink_input_state_t st;
2447 if (i == ignore_input)
2450 st = pa_sink_input_get_state(i);
2452 /* We do not assert here. It is perfectly valid for a sink input to
2453 * be in the INIT state (i.e. created, marked done but not yet put)
2454 * and we should not care if it's unlinked as it won't contribute
2455 * towards our busy status.
2457 if (!PA_SINK_INPUT_IS_LINKED(st))
2460 if (st == PA_SINK_INPUT_CORKED)
2463 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2469 if (s->monitor_source)
2470 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2475 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2477 case PA_SINK_INIT: return "INIT";
2478 case PA_SINK_IDLE: return "IDLE";
2479 case PA_SINK_RUNNING: return "RUNNING";
2480 case PA_SINK_SUSPENDED: return "SUSPENDED";
2481 case PA_SINK_UNLINKED: return "UNLINKED";
2482 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2485 pa_assert_not_reached();
2488 /* Called from the IO thread */
2489 static void sync_input_volumes_within_thread(pa_sink *s) {
2493 pa_sink_assert_ref(s);
2494 pa_sink_assert_io_context(s);
2496 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2497 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2500 i->thread_info.soft_volume = i->soft_volume;
2501 pa_sink_input_request_rewind(i, 0, true, false, false);
2505 /* Called from the IO thread. Only called for the root sink in volume sharing
2506 * cases, except for internal recursive calls. */
2507 static void set_shared_volume_within_thread(pa_sink *s) {
2508 pa_sink_input *i = NULL;
2511 pa_sink_assert_ref(s);
2513 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2515 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2516 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2517 set_shared_volume_within_thread(i->origin_sink);
2521 /* Called from IO thread, except when it is not */
2522 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2523 pa_sink *s = PA_SINK(o);
2524 pa_sink_assert_ref(s);
2526 switch ((pa_sink_message_t) code) {
2528 case PA_SINK_MESSAGE_ADD_INPUT: {
2529 pa_sink_input *i = PA_SINK_INPUT(userdata);
2531 /* If you change anything here, make sure to change the
2532 * sink input handling a few lines down at
2533 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2535 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2537 /* Since the caller sleeps in pa_sink_input_put(), we can
2538 * safely access data outside of thread_info even though
2541 if ((i->thread_info.sync_prev = i->sync_prev)) {
2542 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2543 pa_assert(i->sync_prev->sync_next == i);
2544 i->thread_info.sync_prev->thread_info.sync_next = i;
2547 if ((i->thread_info.sync_next = i->sync_next)) {
2548 pa_assert(i->sink == i->thread_info.sync_next->sink);
2549 pa_assert(i->sync_next->sync_prev == i);
2550 i->thread_info.sync_next->thread_info.sync_prev = i;
2553 pa_sink_input_attach(i);
2555 pa_sink_input_set_state_within_thread(i, i->state);
2557 /* The requested latency of the sink input needs to be fixed up and
2558 * then configured on the sink. If this causes the sink latency to
2559 * go down, the sink implementor is responsible for doing a rewind
2560 * in the update_requested_latency() callback to ensure that the
2561 * sink buffer doesn't contain more data than what the new latency
2564 * XXX: Does it really make sense to push this responsibility to
2565 * the sink implementors? Wouldn't it be better to do it once in
2566 * the core than many times in the modules? */
2568 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2569 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2571 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2572 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2574 /* We don't rewind here automatically. This is left to the
2575 * sink input implementor because some sink inputs need a
2576 * slow start, i.e. need some time to buffer client
2577 * samples before beginning streaming.
2579 * XXX: Does it really make sense to push this functionality to
2580 * the sink implementors? Wouldn't it be better to do it once in
2581 * the core than many times in the modules? */
2583 /* In flat volume mode we need to update the volume as
2585 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2588 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2589 pa_sink_input *i = PA_SINK_INPUT(userdata);
2591 /* If you change anything here, make sure to change the
2592 * sink input handling a few lines down at
2593 * PA_SINK_MESSAGE_START_MOVE, too. */
2595 pa_sink_input_detach(i);
2597 pa_sink_input_set_state_within_thread(i, i->state);
2599 /* Since the caller sleeps in pa_sink_input_unlink(),
2600 * we can safely access data outside of thread_info even
2601 * though it is mutable */
2603 pa_assert(!i->sync_prev);
2604 pa_assert(!i->sync_next);
2606 if (i->thread_info.sync_prev) {
2607 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2608 i->thread_info.sync_prev = NULL;
2611 if (i->thread_info.sync_next) {
2612 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2613 i->thread_info.sync_next = NULL;
2616 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2617 pa_sink_invalidate_requested_latency(s, true);
2618 pa_sink_request_rewind(s, (size_t) -1);
2620 /* In flat volume mode we need to update the volume as
2622 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2625 case PA_SINK_MESSAGE_START_MOVE: {
2626 pa_sink_input *i = PA_SINK_INPUT(userdata);
2628 /* We don't support moving synchronized streams. */
2629 pa_assert(!i->sync_prev);
2630 pa_assert(!i->sync_next);
2631 pa_assert(!i->thread_info.sync_next);
2632 pa_assert(!i->thread_info.sync_prev);
2634 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2636 size_t sink_nbytes, total_nbytes;
2638 /* The old sink probably has some audio from this
2639 * stream in its buffer. We want to "take it back" as
2640 * much as possible and play it to the new sink. We
2641 * don't know at this point how much the old sink can
2642 * rewind. We have to pick something, and that
2643 * something is the full latency of the old sink here.
2644 * So we rewind the stream buffer by the sink latency
2645 * amount, which may be more than what we should
2646 * rewind. This can result in a chunk of audio being
2647 * played both to the old sink and the new sink.
2649 * FIXME: Fix this code so that we don't have to make
2650 * guesses about how much the sink will actually be
2651 * able to rewind. If someone comes up with a solution
2652 * for this, something to note is that the part of the
2653 * latency that the old sink couldn't rewind should
2654 * ideally be compensated after the stream has moved
2655 * to the new sink by adding silence. The new sink
2656 * most likely can't start playing the moved stream
2657 * immediately, and that gap should be removed from
2658 * the "compensation silence" (at least at the time of
2659 * writing this, the move finish code will actually
2660 * already take care of dropping the new sink's
2661 * unrewindable latency, so taking into account the
2662 * unrewindable latency of the old sink is the only
2665 * The render_memblockq contents are discarded,
2666 * because when the sink changes, the format of the
2667 * audio stored in the render_memblockq may change
2668 * too, making the stored audio invalid. FIXME:
2669 * However, the read and write indices are moved back
2670 * the same amount, so if they are not the same now,
2671 * they won't be the same after the rewind either. If
2672 * the write index of the render_memblockq is ahead of
2673 * the read index, then the render_memblockq will feed
2674 * the new sink some silence first, which it shouldn't
2675 * do. The write index should be flushed to be the
2676 * same as the read index. */
2678 /* Get the latency of the sink */
2679 usec = pa_sink_get_latency_within_thread(s, false);
2680 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2681 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2683 if (total_nbytes > 0) {
2684 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2685 i->thread_info.rewrite_flush = true;
2686 pa_sink_input_process_rewind(i, sink_nbytes);
2690 pa_sink_input_detach(i);
2692 /* Let's remove the sink input ...*/
2693 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2695 pa_sink_invalidate_requested_latency(s, true);
2697 pa_log_debug("Requesting rewind due to started move");
2698 pa_sink_request_rewind(s, (size_t) -1);
2700 /* In flat volume mode we need to update the volume as
2702 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2705 case PA_SINK_MESSAGE_FINISH_MOVE: {
2706 pa_sink_input *i = PA_SINK_INPUT(userdata);
2708 /* We don't support moving synchronized streams. */
2709 pa_assert(!i->sync_prev);
2710 pa_assert(!i->sync_next);
2711 pa_assert(!i->thread_info.sync_next);
2712 pa_assert(!i->thread_info.sync_prev);
2714 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2716 pa_sink_input_attach(i);
2718 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2722 /* In the ideal case the new sink would start playing
2723 * the stream immediately. That requires the sink to
2724 * be able to rewind all of its latency, which usually
2725 * isn't possible, so there will probably be some gap
2726 * before the moved stream becomes audible. We then
2727 * have two possibilities: 1) start playing the stream
2728 * from where it is now, or 2) drop the unrewindable
2729 * latency of the sink from the stream. With option 1
2730 * we won't lose any audio but the stream will have a
2731 * pause. With option 2 we may lose some audio but the
2732 * stream time will be somewhat in sync with the wall
2733 * clock. Lennart seems to have chosen option 2 (one
2734 * of the reasons might have been that option 1 is
2735 * actually much harder to implement), so we drop the
2736 * latency of the new sink from the moved stream and
2737 * hope that the sink will undo most of that in the
2740 /* Get the latency of the sink */
2741 usec = pa_sink_get_latency_within_thread(s, false);
2742 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2745 pa_sink_input_drop(i, nbytes);
2747 pa_log_debug("Requesting rewind due to finished move");
2748 pa_sink_request_rewind(s, nbytes);
2751 /* Updating the requested sink latency has to be done
2752 * after the sink rewind request, not before, because
2753 * otherwise the sink may limit the rewind amount
2756 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2757 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2759 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2760 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2762 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2765 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2766 pa_sink *root_sink = pa_sink_get_master(s);
2768 if (PA_LIKELY(root_sink))
2769 set_shared_volume_within_thread(root_sink);
2774 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2776 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2778 pa_sink_volume_change_push(s);
2780 /* Fall through ... */
2782 case PA_SINK_MESSAGE_SET_VOLUME:
2784 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2785 s->thread_info.soft_volume = s->soft_volume;
2786 pa_sink_request_rewind(s, (size_t) -1);
2789 /* Fall through ... */
2791 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2792 sync_input_volumes_within_thread(s);
2795 case PA_SINK_MESSAGE_GET_VOLUME:
2797 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2799 pa_sink_volume_change_flush(s);
2800 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2803 /* In case sink implementor reset SW volume. */
2804 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2805 s->thread_info.soft_volume = s->soft_volume;
2806 pa_sink_request_rewind(s, (size_t) -1);
2811 case PA_SINK_MESSAGE_SET_MUTE:
2813 if (s->thread_info.soft_muted != s->muted) {
2814 s->thread_info.soft_muted = s->muted;
2815 pa_sink_request_rewind(s, (size_t) -1);
2818 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2823 case PA_SINK_MESSAGE_GET_MUTE:
2825 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2826 return s->get_mute(s, userdata);
2830 case PA_SINK_MESSAGE_SET_STATE: {
2832 bool suspend_change =
2833 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2834 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2836 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2838 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2839 s->thread_info.rewind_nbytes = 0;
2840 s->thread_info.rewind_requested = false;
2843 if (suspend_change) {
2847 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2848 if (i->suspend_within_thread)
2849 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2855 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2857 pa_usec_t *usec = userdata;
2858 *usec = pa_sink_get_requested_latency_within_thread(s);
2860 /* Yes, that's right, the IO thread will see -1 when no
2861 * explicit requested latency is configured, the main
2862 * thread will see max_latency */
2863 if (*usec == (pa_usec_t) -1)
2864 *usec = s->thread_info.max_latency;
2869 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2870 pa_usec_t *r = userdata;
2872 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2877 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2878 pa_usec_t *r = userdata;
2880 r[0] = s->thread_info.min_latency;
2881 r[1] = s->thread_info.max_latency;
2886 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2888 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2891 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2893 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2896 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2898 *((size_t*) userdata) = s->thread_info.max_rewind;
2901 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2903 *((size_t*) userdata) = s->thread_info.max_request;
2906 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2908 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2911 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2913 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2916 case PA_SINK_MESSAGE_SET_PORT:
2918 pa_assert(userdata);
2920 struct sink_message_set_port *msg_data = userdata;
2921 msg_data->ret = s->set_port(s, msg_data->port);
2925 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2926 /* This message is sent from IO-thread and handled in main thread. */
2927 pa_assert_ctl_context();
2929 /* Make sure we're not messing with main thread when no longer linked */
2930 if (!PA_SINK_IS_LINKED(s->state))
2933 pa_sink_get_volume(s, true);
2934 pa_sink_get_mute(s, true);
2937 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
2938 s->thread_info.port_latency_offset = offset;
2941 case PA_SINK_MESSAGE_GET_LATENCY:
2942 case PA_SINK_MESSAGE_MAX:
2949 /* Called from main thread */
2950 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2955 pa_core_assert_ref(c);
2956 pa_assert_ctl_context();
2957 pa_assert(cause != 0);
2959 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2962 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2969 /* Called from IO thread */
2970 void pa_sink_detach_within_thread(pa_sink *s) {
2974 pa_sink_assert_ref(s);
2975 pa_sink_assert_io_context(s);
2976 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2978 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2979 pa_sink_input_detach(i);
2981 if (s->monitor_source)
2982 pa_source_detach_within_thread(s->monitor_source);
2985 /* Called from IO thread */
2986 void pa_sink_attach_within_thread(pa_sink *s) {
2990 pa_sink_assert_ref(s);
2991 pa_sink_assert_io_context(s);
2992 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2994 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2995 pa_sink_input_attach(i);
2997 if (s->monitor_source)
2998 pa_source_attach_within_thread(s->monitor_source);
3001 /* Called from IO thread */
3002 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3003 pa_sink_assert_ref(s);
3004 pa_sink_assert_io_context(s);
3005 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3007 if (nbytes == (size_t) -1)
3008 nbytes = s->thread_info.max_rewind;
3010 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3012 if (s->thread_info.rewind_requested &&
3013 nbytes <= s->thread_info.rewind_nbytes)
3016 s->thread_info.rewind_nbytes = nbytes;
3017 s->thread_info.rewind_requested = true;
3019 if (s->request_rewind)
3020 s->request_rewind(s);
3023 /* Called from IO thread */
3024 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3025 pa_usec_t result = (pa_usec_t) -1;
3028 pa_usec_t monitor_latency;
3030 pa_sink_assert_ref(s);
3031 pa_sink_assert_io_context(s);
3033 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3034 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3036 if (s->thread_info.requested_latency_valid)
3037 return s->thread_info.requested_latency;
3039 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3040 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3041 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3042 result = i->thread_info.requested_sink_latency;
3044 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3046 if (monitor_latency != (pa_usec_t) -1 &&
3047 (result == (pa_usec_t) -1 || result > monitor_latency))
3048 result = monitor_latency;
3050 if (result != (pa_usec_t) -1)
3051 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3053 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3054 /* Only cache if properly initialized */
3055 s->thread_info.requested_latency = result;
3056 s->thread_info.requested_latency_valid = true;
3062 /* Called from main thread */
3063 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3066 pa_sink_assert_ref(s);
3067 pa_assert_ctl_context();
3068 pa_assert(PA_SINK_IS_LINKED(s->state));
3070 if (s->state == PA_SINK_SUSPENDED)
3073 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3078 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3079 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3083 pa_sink_assert_ref(s);
3084 pa_sink_assert_io_context(s);
3086 if (max_rewind == s->thread_info.max_rewind)
3089 s->thread_info.max_rewind = max_rewind;
3091 if (PA_SINK_IS_LINKED(s->thread_info.state))
3092 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3093 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3095 if (s->monitor_source)
3096 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3099 /* Called from main thread */
3100 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3101 pa_sink_assert_ref(s);
3102 pa_assert_ctl_context();
3104 if (PA_SINK_IS_LINKED(s->state))
3105 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3107 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3110 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3111 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3114 pa_sink_assert_ref(s);
3115 pa_sink_assert_io_context(s);
3117 if (max_request == s->thread_info.max_request)
3120 s->thread_info.max_request = max_request;
3122 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3125 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3126 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3130 /* Called from main thread */
3131 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3132 pa_sink_assert_ref(s);
3133 pa_assert_ctl_context();
3135 if (PA_SINK_IS_LINKED(s->state))
3136 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3138 pa_sink_set_max_request_within_thread(s, max_request);
3141 /* Called from IO thread */
3142 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3146 pa_sink_assert_ref(s);
3147 pa_sink_assert_io_context(s);
3149 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3150 s->thread_info.requested_latency_valid = false;
3154 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3156 if (s->update_requested_latency)
3157 s->update_requested_latency(s);
3159 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3160 if (i->update_sink_requested_latency)
3161 i->update_sink_requested_latency(i);
3165 /* Called from main thread */
3166 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3167 pa_sink_assert_ref(s);
3168 pa_assert_ctl_context();
3170 /* min_latency == 0: no limit
3171 * min_latency anything else: specified limit
3173 * Similar for max_latency */
3175 if (min_latency < ABSOLUTE_MIN_LATENCY)
3176 min_latency = ABSOLUTE_MIN_LATENCY;
3178 if (max_latency <= 0 ||
3179 max_latency > ABSOLUTE_MAX_LATENCY)
3180 max_latency = ABSOLUTE_MAX_LATENCY;
3182 pa_assert(min_latency <= max_latency);
3184 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3185 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3186 max_latency == ABSOLUTE_MAX_LATENCY) ||
3187 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3189 if (PA_SINK_IS_LINKED(s->state)) {
3195 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3197 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3200 /* Called from main thread */
3201 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3202 pa_sink_assert_ref(s);
3203 pa_assert_ctl_context();
3204 pa_assert(min_latency);
3205 pa_assert(max_latency);
3207 if (PA_SINK_IS_LINKED(s->state)) {
3208 pa_usec_t r[2] = { 0, 0 };
3210 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3212 *min_latency = r[0];
3213 *max_latency = r[1];
3215 *min_latency = s->thread_info.min_latency;
3216 *max_latency = s->thread_info.max_latency;
3220 /* Called from IO thread */
3221 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3222 pa_sink_assert_ref(s);
3223 pa_sink_assert_io_context(s);
3225 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3226 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3227 pa_assert(min_latency <= max_latency);
3229 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3230 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3231 max_latency == ABSOLUTE_MAX_LATENCY) ||
3232 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3234 if (s->thread_info.min_latency == min_latency &&
3235 s->thread_info.max_latency == max_latency)
3238 s->thread_info.min_latency = min_latency;
3239 s->thread_info.max_latency = max_latency;
3241 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3245 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3246 if (i->update_sink_latency_range)
3247 i->update_sink_latency_range(i);
3250 pa_sink_invalidate_requested_latency(s, false);
3252 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3255 /* Called from main thread */
3256 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3257 pa_sink_assert_ref(s);
3258 pa_assert_ctl_context();
3260 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3261 pa_assert(latency == 0);
3265 if (latency < ABSOLUTE_MIN_LATENCY)
3266 latency = ABSOLUTE_MIN_LATENCY;
3268 if (latency > ABSOLUTE_MAX_LATENCY)
3269 latency = ABSOLUTE_MAX_LATENCY;
3271 if (PA_SINK_IS_LINKED(s->state))
3272 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3274 s->thread_info.fixed_latency = latency;
3276 pa_source_set_fixed_latency(s->monitor_source, latency);
3279 /* Called from main thread */
3280 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3283 pa_sink_assert_ref(s);
3284 pa_assert_ctl_context();
3286 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3289 if (PA_SINK_IS_LINKED(s->state))
3290 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3292 latency = s->thread_info.fixed_latency;
3297 /* Called from IO thread */
3298 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3299 pa_sink_assert_ref(s);
3300 pa_sink_assert_io_context(s);
3302 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3303 pa_assert(latency == 0);
3304 s->thread_info.fixed_latency = 0;
3306 if (s->monitor_source)
3307 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3312 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3313 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3315 if (s->thread_info.fixed_latency == latency)
3318 s->thread_info.fixed_latency = latency;
3320 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3324 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3325 if (i->update_sink_fixed_latency)
3326 i->update_sink_fixed_latency(i);
3329 pa_sink_invalidate_requested_latency(s, false);
3331 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3334 /* Called from main context */
3335 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3336 pa_sink_assert_ref(s);
3338 s->port_latency_offset = offset;
3340 if (PA_SINK_IS_LINKED(s->state))
3341 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3343 s->thread_info.port_latency_offset = offset;
3345 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3348 /* Called from main context */
3349 size_t pa_sink_get_max_rewind(pa_sink *s) {
3351 pa_assert_ctl_context();
3352 pa_sink_assert_ref(s);
3354 if (!PA_SINK_IS_LINKED(s->state))
3355 return s->thread_info.max_rewind;
3357 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3362 /* Called from main context */
3363 size_t pa_sink_get_max_request(pa_sink *s) {
3365 pa_sink_assert_ref(s);
3366 pa_assert_ctl_context();
3368 if (!PA_SINK_IS_LINKED(s->state))
3369 return s->thread_info.max_request;
3371 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3376 /* Called from main context */
3377 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3378 pa_device_port *port;
3381 pa_sink_assert_ref(s);
3382 pa_assert_ctl_context();
3385 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3386 return -PA_ERR_NOTIMPLEMENTED;
3390 return -PA_ERR_NOENTITY;
3392 if (!(port = pa_hashmap_get(s->ports, name)))
3393 return -PA_ERR_NOENTITY;
3395 if (s->active_port == port) {
3396 s->save_port = s->save_port || save;
3400 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3401 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3402 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3406 ret = s->set_port(s, port);
3409 return -PA_ERR_NOENTITY;
3411 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3413 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3415 s->active_port = port;
3416 s->save_port = save;
3418 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3420 /* The active port affects the default sink selection. */
3421 pa_core_update_default_sink(s->core);
3423 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3428 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3429 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3433 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3436 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3438 if (pa_streq(ff, "microphone"))
3439 t = "audio-input-microphone";
3440 else if (pa_streq(ff, "webcam"))
3442 else if (pa_streq(ff, "computer"))
3444 else if (pa_streq(ff, "handset"))
3446 else if (pa_streq(ff, "portable"))
3447 t = "multimedia-player";
3448 else if (pa_streq(ff, "tv"))
3449 t = "video-display";
3452 * The following icons are not part of the icon naming spec,
3453 * because Rodney Dawes sucks as the maintainer of that spec.
3455 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3457 else if (pa_streq(ff, "headset"))
3458 t = "audio-headset";
3459 else if (pa_streq(ff, "headphone"))
3460 t = "audio-headphones";
3461 else if (pa_streq(ff, "speaker"))
3462 t = "audio-speakers";
3463 else if (pa_streq(ff, "hands-free"))
3464 t = "audio-handsfree";
3468 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3469 if (pa_streq(c, "modem"))
3476 t = "audio-input-microphone";
3479 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3480 if (strstr(profile, "analog"))
3482 else if (strstr(profile, "iec958"))
3484 else if (strstr(profile, "hdmi"))
3488 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3490 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3495 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3496 const char *s, *d = NULL, *k;
3499 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3503 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3507 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3508 if (pa_streq(s, "internal"))
3509 d = _("Built-in Audio");
3512 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3513 if (pa_streq(s, "modem"))
3517 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3522 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3525 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3527 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3532 bool pa_device_init_intended_roles(pa_proplist *p) {
3536 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3539 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3540 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3541 || pa_streq(s, "headset")) {
3542 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3549 unsigned pa_device_init_priority(pa_proplist *p) {
3551 unsigned priority = 0;
3555 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3557 if (pa_streq(s, "sound"))
3559 else if (!pa_streq(s, "modem"))
3563 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3565 if (pa_streq(s, "headphone"))
3567 else if (pa_streq(s, "hifi"))
3569 else if (pa_streq(s, "speaker"))
3571 else if (pa_streq(s, "portable"))
3575 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3577 if (pa_streq(s, "bluetooth"))
3579 else if (pa_streq(s, "usb"))
3581 else if (pa_streq(s, "pci"))
3585 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3587 if (pa_startswith(s, "analog-"))
3589 else if (pa_startswith(s, "iec958-"))
3596 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3598 /* Called from the IO thread. */
3599 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3600 pa_sink_volume_change *c;
3601 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3602 c = pa_xnew(pa_sink_volume_change, 1);
3604 PA_LLIST_INIT(pa_sink_volume_change, c);
3606 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3610 /* Called from the IO thread. */
3611 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3613 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3617 /* Called from the IO thread. */
3618 void pa_sink_volume_change_push(pa_sink *s) {
3619 pa_sink_volume_change *c = NULL;
3620 pa_sink_volume_change *nc = NULL;
3621 pa_sink_volume_change *pc = NULL;
3622 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3624 const char *direction = NULL;
3627 nc = pa_sink_volume_change_new(s);
3629 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3630 * Adding one more volume for HW would get us rid of this, but I am trying
3631 * to survive with the ones we already have. */
3632 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3634 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3635 pa_log_debug("Volume not changing");
3636 pa_sink_volume_change_free(nc);
3640 nc->at = pa_sink_get_latency_within_thread(s, false);
3641 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3643 if (s->thread_info.volume_changes_tail) {
3644 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3645 /* If volume is going up let's do it a bit late. If it is going
3646 * down let's do it a bit early. */
3647 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3648 if (nc->at + safety_margin > c->at) {
3649 nc->at += safety_margin;
3654 else if (nc->at - safety_margin > c->at) {
3655 nc->at -= safety_margin;
3663 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3664 nc->at += safety_margin;
3667 nc->at -= safety_margin;
3670 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3673 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3676 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3678 /* We can ignore volume events that came earlier but should happen later than this. */
3679 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3680 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3681 pa_sink_volume_change_free(c);
3684 s->thread_info.volume_changes_tail = nc;
3687 /* Called from the IO thread. */
3688 static void pa_sink_volume_change_flush(pa_sink *s) {
3689 pa_sink_volume_change *c = s->thread_info.volume_changes;
3691 s->thread_info.volume_changes = NULL;
3692 s->thread_info.volume_changes_tail = NULL;
3694 pa_sink_volume_change *next = c->next;
3695 pa_sink_volume_change_free(c);
3700 /* Called from the IO thread. */
3701 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3707 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3713 pa_assert(s->write_volume);
3715 now = pa_rtclock_now();
3717 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3718 pa_sink_volume_change *c = s->thread_info.volume_changes;
3719 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3720 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3721 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3723 s->thread_info.current_hw_volume = c->hw_volume;
3724 pa_sink_volume_change_free(c);
3730 if (s->thread_info.volume_changes) {
3732 *usec_to_next = s->thread_info.volume_changes->at - now;
3733 if (pa_log_ratelimit(PA_LOG_DEBUG))
3734 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3739 s->thread_info.volume_changes_tail = NULL;
3744 /* Called from the IO thread. */
3745 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3746 /* All the queued volume events later than current latency are shifted to happen earlier. */
3747 pa_sink_volume_change *c;
3748 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3749 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3750 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3752 pa_log_debug("latency = %lld", (long long) limit);
3753 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3755 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3756 pa_usec_t modified_limit = limit;
3757 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3758 modified_limit -= s->thread_info.volume_change_safety_margin;
3760 modified_limit += s->thread_info.volume_change_safety_margin;
3761 if (c->at > modified_limit) {
3763 if (c->at < modified_limit)
3764 c->at = modified_limit;
3766 prev_vol = pa_cvolume_avg(&c->hw_volume);
3768 pa_sink_volume_change_apply(s, NULL);
3771 /* Called from the main thread */
3772 /* Gets the list of formats supported by the sink. The members and idxset must
3773 * be freed by the caller. */
3774 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3779 if (s->get_formats) {
3780 /* Sink supports format query, all is good */
3781 ret = s->get_formats(s);
3783 /* Sink doesn't support format query, so assume it does PCM */
3784 pa_format_info *f = pa_format_info_new();
3785 f->encoding = PA_ENCODING_PCM;
3787 ret = pa_idxset_new(NULL, NULL);
3788 pa_idxset_put(ret, f, NULL);
3794 /* Called from the main thread */
3795 /* Allows an external source to set what formats a sink supports if the sink
3796 * permits this. The function makes a copy of the formats on success. */
3797 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3802 /* Sink supports setting formats -- let's give it a shot */
3803 return s->set_formats(s, formats);
3805 /* Sink doesn't support setting this -- bail out */
3809 /* Called from the main thread */
3810 /* Checks if the sink can accept this format */
3811 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3812 pa_idxset *formats = NULL;
3818 formats = pa_sink_get_formats(s);
3821 pa_format_info *finfo_device;
3824 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3825 if (pa_format_info_is_compatible(finfo_device, f)) {
3831 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3837 /* Called from the main thread */
3838 /* Calculates the intersection between formats supported by the sink and
3839 * in_formats, and returns these, in the order of the sink's formats. */
3840 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3841 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3842 pa_format_info *f_sink, *f_in;
3847 if (!in_formats || pa_idxset_isempty(in_formats))
3850 sink_formats = pa_sink_get_formats(s);
3852 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3853 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3854 if (pa_format_info_is_compatible(f_sink, f_in))
3855 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3861 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3866 /* Called from the main thread. */
3867 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3868 pa_cvolume old_volume;
3869 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3870 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3875 old_volume = s->reference_volume;
3877 if (pa_cvolume_equal(volume, &old_volume))
3880 s->reference_volume = *volume;
3881 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3882 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3883 s->flags & PA_SINK_DECIBEL_VOLUME),
3884 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3885 s->flags & PA_SINK_DECIBEL_VOLUME));
3887 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3888 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);