2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (pa_page_size())
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 struct pa_sink_volume_change {
64 PA_LLIST_FIELDS(pa_sink_volume_change);
67 struct sink_message_set_port {
72 static void sink_free(pa_object *s);
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
82 data->proplist = pa_proplist_new();
83 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
92 data->name = pa_xstrdup(name);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 data->alternate_sample_rate_is_set = true;
113 data->alternate_sample_rate = alternate_sample_rate;
116 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 if ((data->volume_is_set = !!volume))
120 data->volume = *volume;
123 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
126 data->muted_is_set = true;
130 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_xfree(data->active_port);
134 data->active_port = pa_xstrdup(port);
137 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_proplist_free(data->proplist);
143 pa_hashmap_free(data->ports);
145 pa_xfree(data->name);
146 pa_xfree(data->active_port);
149 /* Called from main context */
150 static void reset_callbacks(pa_sink *s) {
154 s->get_volume = NULL;
155 s->set_volume = NULL;
156 s->write_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
162 s->get_formats = NULL;
163 s->set_formats = NULL;
164 s->update_rate = NULL;
167 /* Called from main context */
168 pa_sink* pa_sink_new(
170 pa_sink_new_data *data,
171 pa_sink_flags_t flags) {
175 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
176 pa_source_new_data source_data;
182 pa_assert(data->name);
183 pa_assert_ctl_context();
185 s = pa_msgobject_new(pa_sink);
187 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
188 pa_log_debug("Failed to register name %s.", data->name);
193 pa_sink_new_data_set_name(data, name);
195 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
197 pa_namereg_unregister(core, name);
201 /* FIXME, need to free s here on failure */
203 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
204 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
206 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
208 if (!data->channel_map_is_set)
209 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
211 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
212 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
214 /* FIXME: There should probably be a general function for checking whether
215 * the sink volume is allowed to be set, like there is for sink inputs. */
216 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
218 if (!data->volume_is_set) {
219 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
220 data->save_volume = false;
223 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
224 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
226 if (!data->muted_is_set)
230 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
232 pa_device_init_description(data->proplist, data->card);
233 pa_device_init_icon(data->proplist, true);
234 pa_device_init_intended_roles(data->proplist);
236 if (!data->active_port) {
237 pa_device_port *p = pa_device_port_find_best(data->ports);
239 pa_sink_new_data_set_port(data, p->name);
242 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
244 pa_namereg_unregister(core, name);
248 s->parent.parent.free = sink_free;
249 s->parent.process_msg = pa_sink_process_msg;
252 s->state = PA_SINK_INIT;
255 s->suspend_cause = data->suspend_cause;
256 pa_sink_set_mixer_dirty(s, false);
257 s->name = pa_xstrdup(name);
258 s->proplist = pa_proplist_copy(data->proplist);
259 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
260 s->module = data->module;
261 s->card = data->card;
263 s->priority = pa_device_init_priority(s->proplist);
265 s->sample_spec = data->sample_spec;
266 s->channel_map = data->channel_map;
267 s->default_sample_rate = s->sample_spec.rate;
269 if (data->alternate_sample_rate_is_set)
270 s->alternate_sample_rate = data->alternate_sample_rate;
272 s->alternate_sample_rate = s->core->alternate_sample_rate;
274 if (s->sample_spec.rate == s->alternate_sample_rate) {
275 pa_log_warn("Default and alternate sample rates are the same.");
276 s->alternate_sample_rate = 0;
279 s->inputs = pa_idxset_new(NULL, NULL);
281 s->input_to_master = NULL;
283 s->reference_volume = s->real_volume = data->volume;
284 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
285 s->base_volume = PA_VOLUME_NORM;
286 s->n_volume_steps = PA_VOLUME_NORM+1;
287 s->muted = data->muted;
288 s->refresh_volume = s->refresh_muted = false;
295 /* As a minor optimization we just steal the list instead of
297 s->ports = data->ports;
300 s->active_port = NULL;
301 s->save_port = false;
303 if (data->active_port)
304 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
305 s->save_port = data->save_port;
307 /* Hopefully the active port has already been assigned in the previous call
308 to pa_device_port_find_best, but better safe than sorry */
310 s->active_port = pa_device_port_find_best(s->ports);
313 s->port_latency_offset = s->active_port->latency_offset;
315 s->port_latency_offset = 0;
317 s->save_volume = data->save_volume;
318 s->save_muted = data->save_muted;
320 pa_silence_memchunk_get(
321 &core->silence_cache,
327 s->thread_info.rtpoll = NULL;
328 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
329 (pa_free_cb_t) pa_sink_input_unref);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = false;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = false;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.port_latency_offset = s->port_latency_offset;
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
382 pa_source_new_data_done(&source_data);
384 if (!s->monitor_source) {
390 s->monitor_source->monitor_of = s;
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
403 pa_sink_state_t original_state;
406 pa_assert_ctl_context();
408 if (s->state == state)
411 original_state = s->state;
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
418 if ((ret = s->set_state(s, state)) < 0)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
425 s->set_state(s, original_state);
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
437 if (suspend_change) {
441 /* We're suspending or resuming, tell everyone about it */
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
448 i->suspend(i, state == PA_SINK_SUSPENDED);
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
467 pa_assert(!s->write_volume || cb);
471 /* Save the current flags so we can tell if they've changed */
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
492 pa_assert(!cb || s->set_volume);
494 s->write_volume = cb;
496 /* Save the current flags so we can tell if they've changed */
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
522 /* Save the current flags so we can tell if they've changed */
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
535 static void enable_flat_volume(pa_sink *s, bool enable) {
536 pa_sink_flags_t flags;
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
543 /* Save the current flags so we can tell if they've changed */
547 s->flags |= PA_SINK_FLAT_VOLUME;
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
556 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
557 pa_sink_flags_t flags;
561 /* Save the current flags so we can tell if they've changed */
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, true);
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, false);
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
597 * Note: All of these flags set here can change over the life time
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
615 pa_sink_enable_decibel_volume(s, true);
616 s->soft_volume = s->reference_volume;
619 /* If the sink implementor support DB volumes by itself, we should always
620 * try and enable flat volumes too */
621 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
622 enable_flat_volume(s, true);
624 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
625 pa_sink *root_sink = pa_sink_get_master(s);
627 pa_assert(root_sink);
629 s->reference_volume = root_sink->reference_volume;
630 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
632 s->real_volume = root_sink->real_volume;
633 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
635 /* We assume that if the sink implementor changed the default
636 * volume he did so in real_volume, because that is the usual
637 * place where he is supposed to place his changes. */
638 s->reference_volume = s->real_volume;
640 s->thread_info.soft_volume = s->soft_volume;
641 s->thread_info.soft_muted = s->muted;
642 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
644 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
645 || (s->base_volume == PA_VOLUME_NORM
646 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
647 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
649 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
650 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
652 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
653 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
654 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
656 if (s->suspend_cause)
657 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
659 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
661 pa_source_put(s->monitor_source);
663 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
664 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
666 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
667 * because module-switch-on-connect needs to know the old default sink */
668 pa_core_update_default_sink(s->core);
671 /* Called from main context */
672 void pa_sink_unlink(pa_sink* s) {
674 pa_sink_input *i, PA_UNUSED *j = NULL;
676 pa_sink_assert_ref(s);
677 pa_assert_ctl_context();
679 /* Please note that pa_sink_unlink() does more than simply
680 * reversing pa_sink_put(). It also undoes the registrations
681 * already done in pa_sink_new()! */
683 if (s->unlink_requested)
686 s->unlink_requested = true;
688 linked = PA_SINK_IS_LINKED(s->state);
691 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
693 if (s->state != PA_SINK_UNLINKED)
694 pa_namereg_unregister(s->core, s->name);
695 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
697 pa_core_update_default_sink(s->core);
700 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
702 while ((i = pa_idxset_first(s->inputs, NULL))) {
704 pa_sink_input_kill(i);
709 sink_set_state(s, PA_SINK_UNLINKED);
711 s->state = PA_SINK_UNLINKED;
715 if (s->monitor_source)
716 pa_source_unlink(s->monitor_source);
719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
720 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
724 /* Called from main context */
725 static void sink_free(pa_object *o) {
726 pa_sink *s = PA_SINK(o);
729 pa_assert_ctl_context();
730 pa_assert(pa_sink_refcnt(s) == 0);
731 pa_assert(!PA_SINK_IS_LINKED(s->state));
733 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
735 pa_sink_volume_change_flush(s);
737 if (s->monitor_source) {
738 pa_source_unref(s->monitor_source);
739 s->monitor_source = NULL;
742 pa_idxset_free(s->inputs, NULL);
743 pa_hashmap_free(s->thread_info.inputs);
745 if (s->silence.memblock)
746 pa_memblock_unref(s->silence.memblock);
752 pa_proplist_free(s->proplist);
755 pa_hashmap_free(s->ports);
760 /* Called from main context, and not while the IO thread is active, please */
761 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
762 pa_sink_assert_ref(s);
763 pa_assert_ctl_context();
767 if (s->monitor_source)
768 pa_source_set_asyncmsgq(s->monitor_source, q);
771 /* Called from main context, and not while the IO thread is active, please */
772 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
773 pa_sink_flags_t old_flags;
774 pa_sink_input *input;
777 pa_sink_assert_ref(s);
778 pa_assert_ctl_context();
780 /* For now, allow only a minimal set of flags to be changed. */
781 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
783 old_flags = s->flags;
784 s->flags = (s->flags & ~mask) | (value & mask);
786 if (s->flags == old_flags)
789 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
790 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
792 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
793 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
794 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
796 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
797 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
799 if (s->monitor_source)
800 pa_source_update_flags(s->monitor_source,
801 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
802 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
803 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
804 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
806 PA_IDXSET_FOREACH(input, s->inputs, idx) {
807 if (input->origin_sink)
808 pa_sink_update_flags(input->origin_sink, mask, value);
812 /* Called from IO context, or before _put() from main context */
813 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
814 pa_sink_assert_ref(s);
815 pa_sink_assert_io_context(s);
817 s->thread_info.rtpoll = p;
819 if (s->monitor_source)
820 pa_source_set_rtpoll(s->monitor_source, p);
823 /* Called from main context */
824 int pa_sink_update_status(pa_sink*s) {
825 pa_sink_assert_ref(s);
826 pa_assert_ctl_context();
827 pa_assert(PA_SINK_IS_LINKED(s->state));
829 if (s->state == PA_SINK_SUSPENDED)
832 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
835 /* Called from any context - must be threadsafe */
836 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
837 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
840 /* Called from main context */
841 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
842 pa_sink_assert_ref(s);
843 pa_assert_ctl_context();
844 pa_assert(PA_SINK_IS_LINKED(s->state));
845 pa_assert(cause != 0);
848 s->suspend_cause |= cause;
849 s->monitor_source->suspend_cause |= cause;
851 s->suspend_cause &= ~cause;
852 s->monitor_source->suspend_cause &= ~cause;
855 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
856 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
857 it'll be handled just fine. */
858 pa_sink_set_mixer_dirty(s, false);
859 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
860 if (s->active_port && s->set_port) {
861 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
862 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
863 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
866 s->set_port(s, s->active_port);
876 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
879 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
881 if (s->suspend_cause)
882 return sink_set_state(s, PA_SINK_SUSPENDED);
884 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
887 /* Called from main context */
888 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
889 pa_sink_input *i, *n;
892 pa_sink_assert_ref(s);
893 pa_assert_ctl_context();
894 pa_assert(PA_SINK_IS_LINKED(s->state));
899 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
900 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
902 pa_sink_input_ref(i);
904 if (pa_sink_input_start_move(i) >= 0)
907 pa_sink_input_unref(i);
913 /* Called from main context */
914 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
917 pa_sink_assert_ref(s);
918 pa_assert_ctl_context();
919 pa_assert(PA_SINK_IS_LINKED(s->state));
922 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
923 if (pa_sink_input_finish_move(i, s, save) < 0)
924 pa_sink_input_fail_move(i);
926 pa_sink_input_unref(i);
929 pa_queue_free(q, NULL);
932 /* Called from main context */
933 void pa_sink_move_all_fail(pa_queue *q) {
936 pa_assert_ctl_context();
939 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
940 pa_sink_input_fail_move(i);
941 pa_sink_input_unref(i);
944 pa_queue_free(q, NULL);
947 /* Called from IO thread context */
948 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
953 pa_sink_assert_ref(s);
954 pa_sink_assert_io_context(s);
956 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
957 size_t uf = i->thread_info.underrun_for_sink;
959 /* Propagate down the filter tree */
960 if (i->origin_sink) {
961 size_t filter_result, left_to_play_origin;
963 /* The recursive call works in the origin sink domain ... */
964 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
966 /* .. and returns the time to sleep before waking up. We need the
967 * underrun duration for comparisons, so we undo the subtraction on
968 * the return value... */
969 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
971 /* ... and convert it back to the master sink domain */
972 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
974 /* Remember the longest underrun so far */
975 if (filter_result > result)
976 result = filter_result;
980 /* No underrun here, move on */
982 } else if (uf >= left_to_play) {
983 /* The sink has possibly consumed all the data the sink input provided */
984 pa_sink_input_process_underrun(i);
985 } else if (uf > result) {
986 /* Remember the longest underrun so far */
992 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
993 (long) result, (long) left_to_play - result);
994 return left_to_play - result;
997 /* Called from IO thread context */
998 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1002 pa_sink_assert_ref(s);
1003 pa_sink_assert_io_context(s);
1004 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1006 /* If nobody requested this and this is actually no real rewind
1007 * then we can short cut this. Please note that this means that
1008 * not all rewind requests triggered upstream will always be
1009 * translated in actual requests! */
1010 if (!s->thread_info.rewind_requested && nbytes <= 0)
1013 s->thread_info.rewind_nbytes = 0;
1014 s->thread_info.rewind_requested = false;
1017 pa_log_debug("Processing rewind...");
1018 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1019 pa_sink_volume_change_rewind(s, nbytes);
1022 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1023 pa_sink_input_assert_ref(i);
1024 pa_sink_input_process_rewind(i, nbytes);
1028 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1029 pa_source_process_rewind(s->monitor_source, nbytes);
1033 /* Called from IO thread context */
1034 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1038 size_t mixlength = *length;
1040 pa_sink_assert_ref(s);
1041 pa_sink_assert_io_context(s);
1044 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1045 pa_sink_input_assert_ref(i);
1047 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1049 if (mixlength == 0 || info->chunk.length < mixlength)
1050 mixlength = info->chunk.length;
1052 if (pa_memblock_is_silence(info->chunk.memblock)) {
1053 pa_memblock_unref(info->chunk.memblock);
1057 info->userdata = pa_sink_input_ref(i);
1059 pa_assert(info->chunk.memblock);
1060 pa_assert(info->chunk.length > 0);
1068 *length = mixlength;
1073 /* Called from IO thread context */
1074 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1078 unsigned n_unreffed = 0;
1080 pa_sink_assert_ref(s);
1081 pa_sink_assert_io_context(s);
1083 pa_assert(result->memblock);
1084 pa_assert(result->length > 0);
1086 /* We optimize for the case where the order of the inputs has not changed */
1088 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1090 pa_mix_info* m = NULL;
1092 pa_sink_input_assert_ref(i);
1094 /* Let's try to find the matching entry info the pa_mix_info array */
1095 for (j = 0; j < n; j ++) {
1097 if (info[p].userdata == i) {
1107 /* Drop read data */
1108 pa_sink_input_drop(i, result->length);
1110 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1112 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1113 void *ostate = NULL;
1114 pa_source_output *o;
1117 if (m && m->chunk.memblock) {
1119 pa_memblock_ref(c.memblock);
1120 pa_assert(result->length <= c.length);
1121 c.length = result->length;
1123 pa_memchunk_make_writable(&c, 0);
1124 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1127 pa_memblock_ref(c.memblock);
1128 pa_assert(result->length <= c.length);
1129 c.length = result->length;
1132 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1133 pa_source_output_assert_ref(o);
1134 pa_assert(o->direct_on_input == i);
1135 pa_source_post_direct(s->monitor_source, o, &c);
1138 pa_memblock_unref(c.memblock);
1143 if (m->chunk.memblock) {
1144 pa_memblock_unref(m->chunk.memblock);
1145 pa_memchunk_reset(&m->chunk);
1148 pa_sink_input_unref(m->userdata);
1155 /* Now drop references to entries that are included in the
1156 * pa_mix_info array but don't exist anymore */
1158 if (n_unreffed < n) {
1159 for (; n > 0; info++, n--) {
1161 pa_sink_input_unref(info->userdata);
1162 if (info->chunk.memblock)
1163 pa_memblock_unref(info->chunk.memblock);
1167 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1168 pa_source_post(s->monitor_source, result);
1171 /* Called from IO thread context */
1172 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1173 pa_mix_info info[MAX_MIX_CHANNELS];
1175 size_t block_size_max;
1177 pa_sink_assert_ref(s);
1178 pa_sink_assert_io_context(s);
1179 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1180 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1183 pa_assert(!s->thread_info.rewind_requested);
1184 pa_assert(s->thread_info.rewind_nbytes == 0);
1186 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1187 result->memblock = pa_memblock_ref(s->silence.memblock);
1188 result->index = s->silence.index;
1189 result->length = PA_MIN(s->silence.length, length);
1196 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1198 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1199 if (length > block_size_max)
1200 length = pa_frame_align(block_size_max, &s->sample_spec);
1202 pa_assert(length > 0);
1204 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1208 *result = s->silence;
1209 pa_memblock_ref(result->memblock);
1211 if (result->length > length)
1212 result->length = length;
1214 } else if (n == 1) {
1217 *result = info[0].chunk;
1218 pa_memblock_ref(result->memblock);
1220 if (result->length > length)
1221 result->length = length;
1223 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1225 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1226 pa_memblock_unref(result->memblock);
1227 pa_silence_memchunk_get(&s->core->silence_cache,
1232 } else if (!pa_cvolume_is_norm(&volume)) {
1233 pa_memchunk_make_writable(result, 0);
1234 pa_volume_memchunk(result, &s->sample_spec, &volume);
1238 result->memblock = pa_memblock_new(s->core->mempool, length);
1240 ptr = pa_memblock_acquire(result->memblock);
1241 result->length = pa_mix(info, n,
1244 &s->thread_info.soft_volume,
1245 s->thread_info.soft_muted);
1246 pa_memblock_release(result->memblock);
1251 inputs_drop(s, info, n, result);
1256 /* Called from IO thread context */
1257 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1258 pa_mix_info info[MAX_MIX_CHANNELS];
1260 size_t length, block_size_max;
1262 pa_sink_assert_ref(s);
1263 pa_sink_assert_io_context(s);
1264 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1266 pa_assert(target->memblock);
1267 pa_assert(target->length > 0);
1268 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1270 pa_assert(!s->thread_info.rewind_requested);
1271 pa_assert(s->thread_info.rewind_nbytes == 0);
1273 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1274 pa_silence_memchunk(target, &s->sample_spec);
1280 length = target->length;
1281 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1282 if (length > block_size_max)
1283 length = pa_frame_align(block_size_max, &s->sample_spec);
1285 pa_assert(length > 0);
1287 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1290 if (target->length > length)
1291 target->length = length;
1293 pa_silence_memchunk(target, &s->sample_spec);
1294 } else if (n == 1) {
1297 if (target->length > length)
1298 target->length = length;
1300 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1302 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1303 pa_silence_memchunk(target, &s->sample_spec);
1307 vchunk = info[0].chunk;
1308 pa_memblock_ref(vchunk.memblock);
1310 if (vchunk.length > length)
1311 vchunk.length = length;
1313 if (!pa_cvolume_is_norm(&volume)) {
1314 pa_memchunk_make_writable(&vchunk, 0);
1315 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1318 pa_memchunk_memcpy(target, &vchunk);
1319 pa_memblock_unref(vchunk.memblock);
1325 ptr = pa_memblock_acquire(target->memblock);
1327 target->length = pa_mix(info, n,
1328 (uint8_t*) ptr + target->index, length,
1330 &s->thread_info.soft_volume,
1331 s->thread_info.soft_muted);
1333 pa_memblock_release(target->memblock);
1336 inputs_drop(s, info, n, target);
1341 /* Called from IO thread context */
1342 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1346 pa_sink_assert_ref(s);
1347 pa_sink_assert_io_context(s);
1348 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1350 pa_assert(target->memblock);
1351 pa_assert(target->length > 0);
1352 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1354 pa_assert(!s->thread_info.rewind_requested);
1355 pa_assert(s->thread_info.rewind_nbytes == 0);
1357 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1358 pa_silence_memchunk(target, &s->sample_spec);
1371 pa_sink_render_into(s, &chunk);
1380 /* Called from IO thread context */
1381 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1382 pa_sink_assert_ref(s);
1383 pa_sink_assert_io_context(s);
1384 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1385 pa_assert(length > 0);
1386 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1389 pa_assert(!s->thread_info.rewind_requested);
1390 pa_assert(s->thread_info.rewind_nbytes == 0);
1394 pa_sink_render(s, length, result);
1396 if (result->length < length) {
1399 pa_memchunk_make_writable(result, length);
1401 chunk.memblock = result->memblock;
1402 chunk.index = result->index + result->length;
1403 chunk.length = length - result->length;
1405 pa_sink_render_into_full(s, &chunk);
1407 result->length = length;
1413 /* Called from main thread */
1414 int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
1416 uint32_t desired_rate;
1417 uint32_t default_rate = s->default_sample_rate;
1418 uint32_t alternate_rate = s->alternate_sample_rate;
1421 bool default_rate_is_usable = false;
1422 bool alternate_rate_is_usable = false;
1423 bool avoid_resampling = s->core->avoid_resampling;
1425 if (rate == s->sample_spec.rate)
1428 if (!s->update_rate)
1431 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1432 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1436 if (PA_SINK_IS_RUNNING(s->state)) {
1437 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1438 s->sample_spec.rate);
1442 if (s->monitor_source) {
1443 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1444 pa_log_info("Cannot update rate, monitor source is RUNNING");
1449 if (PA_UNLIKELY(!pa_sample_rate_valid(rate)))
1453 /* We have to try to use the sink input rate */
1454 desired_rate = rate;
1456 } else if (avoid_resampling && (rate >= default_rate || rate >= alternate_rate)) {
1457 /* We just try to set the sink input's sample rate if it's not too low */
1458 desired_rate = rate;
1460 } else if (default_rate == rate || alternate_rate == rate) {
1461 /* We can directly try to use this rate */
1462 desired_rate = rate;
1465 /* See if we can pick a rate that results in less resampling effort */
1466 if (default_rate % 11025 == 0 && rate % 11025 == 0)
1467 default_rate_is_usable = true;
1468 if (default_rate % 4000 == 0 && rate % 4000 == 0)
1469 default_rate_is_usable = true;
1470 if (alternate_rate && alternate_rate % 11025 == 0 && rate % 11025 == 0)
1471 alternate_rate_is_usable = true;
1472 if (alternate_rate && alternate_rate % 4000 == 0 && rate % 4000 == 0)
1473 alternate_rate_is_usable = true;
1475 if (alternate_rate_is_usable && !default_rate_is_usable)
1476 desired_rate = alternate_rate;
1478 desired_rate = default_rate;
1481 if (desired_rate == s->sample_spec.rate)
1484 if (!passthrough && pa_sink_used_by(s) > 0)
1487 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1488 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1490 if (s->update_rate(s, desired_rate) >= 0) {
1491 /* update monitor source as well */
1492 if (s->monitor_source && !passthrough)
1493 pa_source_update_rate(s->monitor_source, desired_rate, false);
1494 pa_log_info("Changed sampling rate successfully");
1496 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1497 if (i->state == PA_SINK_INPUT_CORKED)
1498 pa_sink_input_update_rate(i);
1504 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1509 /* Called from main thread */
1510 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1513 pa_sink_assert_ref(s);
1514 pa_assert_ctl_context();
1515 pa_assert(PA_SINK_IS_LINKED(s->state));
1517 /* The returned value is supposed to be in the time domain of the sound card! */
1519 if (s->state == PA_SINK_SUSPENDED)
1522 if (!(s->flags & PA_SINK_LATENCY))
1525 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1527 /* the return value is unsigned, so check that the offset can be added to usec without
1529 if (-s->port_latency_offset <= usec)
1530 usec += s->port_latency_offset;
1534 return (pa_usec_t)usec;
1537 /* Called from IO thread */
1538 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1542 pa_sink_assert_ref(s);
1543 pa_sink_assert_io_context(s);
1544 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1546 /* The returned value is supposed to be in the time domain of the sound card! */
1548 if (s->thread_info.state == PA_SINK_SUSPENDED)
1551 if (!(s->flags & PA_SINK_LATENCY))
1554 o = PA_MSGOBJECT(s);
1556 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1558 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1560 /* If allow_negative is false, the call should only return positive values, */
1561 usec += s->thread_info.port_latency_offset;
1562 if (!allow_negative && usec < 0)
1568 /* Called from the main thread (and also from the IO thread while the main
1569 * thread is waiting).
1571 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1572 * set. Instead, flat volume mode is detected by checking whether the root sink
1573 * has the flag set. */
1574 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1575 pa_sink_assert_ref(s);
1577 s = pa_sink_get_master(s);
1580 return (s->flags & PA_SINK_FLAT_VOLUME);
1585 /* Called from the main thread (and also from the IO thread while the main
1586 * thread is waiting). */
1587 pa_sink *pa_sink_get_master(pa_sink *s) {
1588 pa_sink_assert_ref(s);
1590 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1591 if (PA_UNLIKELY(!s->input_to_master))
1594 s = s->input_to_master->sink;
1600 /* Called from main context */
1601 bool pa_sink_is_filter(pa_sink *s) {
1602 pa_sink_assert_ref(s);
1604 return (s->input_to_master != NULL);
1607 /* Called from main context */
1608 bool pa_sink_is_passthrough(pa_sink *s) {
1609 pa_sink_input *alt_i;
1612 pa_sink_assert_ref(s);
1614 /* one and only one PASSTHROUGH input can possibly be connected */
1615 if (pa_idxset_size(s->inputs) == 1) {
1616 alt_i = pa_idxset_first(s->inputs, &idx);
1618 if (pa_sink_input_is_passthrough(alt_i))
1625 /* Called from main context */
1626 void pa_sink_enter_passthrough(pa_sink *s) {
1629 /* disable the monitor in passthrough mode */
1630 if (s->monitor_source) {
1631 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1632 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1635 /* set the volume to NORM */
1636 s->saved_volume = *pa_sink_get_volume(s, true);
1637 s->saved_save_volume = s->save_volume;
1639 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1640 pa_sink_set_volume(s, &volume, true, false);
1643 /* Called from main context */
1644 void pa_sink_leave_passthrough(pa_sink *s) {
1645 /* Unsuspend monitor */
1646 if (s->monitor_source) {
1647 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1648 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1651 /* Restore sink volume to what it was before we entered passthrough mode */
1652 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1654 pa_cvolume_init(&s->saved_volume);
1655 s->saved_save_volume = false;
1658 /* Called from main context. */
1659 static void compute_reference_ratio(pa_sink_input *i) {
1661 pa_cvolume remapped;
1665 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1668 * Calculates the reference ratio from the sink's reference
1669 * volume. This basically calculates:
1671 * i->reference_ratio = i->volume / i->sink->reference_volume
1674 remapped = i->sink->reference_volume;
1675 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1677 ratio = i->reference_ratio;
1679 for (c = 0; c < i->sample_spec.channels; c++) {
1681 /* We don't update when the sink volume is 0 anyway */
1682 if (remapped.values[c] <= PA_VOLUME_MUTED)
1685 /* Don't update the reference ratio unless necessary */
1686 if (pa_sw_volume_multiply(
1688 remapped.values[c]) == i->volume.values[c])
1691 ratio.values[c] = pa_sw_volume_divide(
1692 i->volume.values[c],
1693 remapped.values[c]);
1696 pa_sink_input_set_reference_ratio(i, &ratio);
1699 /* Called from main context. Only called for the root sink in volume sharing
1700 * cases, except for internal recursive calls. */
1701 static void compute_reference_ratios(pa_sink *s) {
1705 pa_sink_assert_ref(s);
1706 pa_assert_ctl_context();
1707 pa_assert(PA_SINK_IS_LINKED(s->state));
1708 pa_assert(pa_sink_flat_volume_enabled(s));
1710 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1711 compute_reference_ratio(i);
1713 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1714 compute_reference_ratios(i->origin_sink);
1718 /* Called from main context. Only called for the root sink in volume sharing
1719 * cases, except for internal recursive calls. */
1720 static void compute_real_ratios(pa_sink *s) {
1724 pa_sink_assert_ref(s);
1725 pa_assert_ctl_context();
1726 pa_assert(PA_SINK_IS_LINKED(s->state));
1727 pa_assert(pa_sink_flat_volume_enabled(s));
1729 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1731 pa_cvolume remapped;
1733 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1734 /* The origin sink uses volume sharing, so this input's real ratio
1735 * is handled as a special case - the real ratio must be 0 dB, and
1736 * as a result i->soft_volume must equal i->volume_factor. */
1737 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1738 i->soft_volume = i->volume_factor;
1740 compute_real_ratios(i->origin_sink);
1746 * This basically calculates:
1748 * i->real_ratio := i->volume / s->real_volume
1749 * i->soft_volume := i->real_ratio * i->volume_factor
1752 remapped = s->real_volume;
1753 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1755 i->real_ratio.channels = i->sample_spec.channels;
1756 i->soft_volume.channels = i->sample_spec.channels;
1758 for (c = 0; c < i->sample_spec.channels; c++) {
1760 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1761 /* We leave i->real_ratio untouched */
1762 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1766 /* Don't lose accuracy unless necessary */
1767 if (pa_sw_volume_multiply(
1768 i->real_ratio.values[c],
1769 remapped.values[c]) != i->volume.values[c])
1771 i->real_ratio.values[c] = pa_sw_volume_divide(
1772 i->volume.values[c],
1773 remapped.values[c]);
1775 i->soft_volume.values[c] = pa_sw_volume_multiply(
1776 i->real_ratio.values[c],
1777 i->volume_factor.values[c]);
1780 /* We don't copy the soft_volume to the thread_info data
1781 * here. That must be done by the caller */
1785 static pa_cvolume *cvolume_remap_minimal_impact(
1787 const pa_cvolume *template,
1788 const pa_channel_map *from,
1789 const pa_channel_map *to) {
1794 pa_assert(template);
1797 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1798 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1800 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1801 * mapping from sink input to sink volumes:
1803 * If template is a possible remapping from v it is used instead
1804 * of remapping anew.
1806 * If the channel maps don't match we set an all-channel volume on
1807 * the sink to ensure that changing a volume on one stream has no
1808 * effect that cannot be compensated for in another stream that
1809 * does not have the same channel map as the sink. */
1811 if (pa_channel_map_equal(from, to))
1815 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1820 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1824 /* Called from main thread. Only called for the root sink in volume sharing
1825 * cases, except for internal recursive calls. */
1826 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1830 pa_sink_assert_ref(s);
1831 pa_assert(max_volume);
1832 pa_assert(channel_map);
1833 pa_assert(pa_sink_flat_volume_enabled(s));
1835 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1836 pa_cvolume remapped;
1838 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1839 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1841 /* Ignore this input. The origin sink uses volume sharing, so this
1842 * input's volume will be set to be equal to the root sink's real
1843 * volume. Obviously this input's current volume must not then
1844 * affect what the root sink's real volume will be. */
1848 remapped = i->volume;
1849 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1850 pa_cvolume_merge(max_volume, max_volume, &remapped);
1854 /* Called from main thread. Only called for the root sink in volume sharing
1855 * cases, except for internal recursive calls. */
1856 static bool has_inputs(pa_sink *s) {
1860 pa_sink_assert_ref(s);
1862 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1863 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1870 /* Called from main thread. Only called for the root sink in volume sharing
1871 * cases, except for internal recursive calls. */
1872 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1876 pa_sink_assert_ref(s);
1877 pa_assert(new_volume);
1878 pa_assert(channel_map);
1880 s->real_volume = *new_volume;
1881 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1883 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1884 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1885 if (pa_sink_flat_volume_enabled(s)) {
1886 pa_cvolume new_input_volume;
1888 /* Follow the root sink's real volume. */
1889 new_input_volume = *new_volume;
1890 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
1891 pa_sink_input_set_volume_direct(i, &new_input_volume);
1892 compute_reference_ratio(i);
1895 update_real_volume(i->origin_sink, new_volume, channel_map);
1900 /* Called from main thread. Only called for the root sink in shared volume
1902 static void compute_real_volume(pa_sink *s) {
1903 pa_sink_assert_ref(s);
1904 pa_assert_ctl_context();
1905 pa_assert(PA_SINK_IS_LINKED(s->state));
1906 pa_assert(pa_sink_flat_volume_enabled(s));
1907 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1909 /* This determines the maximum volume of all streams and sets
1910 * s->real_volume accordingly. */
1912 if (!has_inputs(s)) {
1913 /* In the special case that we have no sink inputs we leave the
1914 * volume unmodified. */
1915 update_real_volume(s, &s->reference_volume, &s->channel_map);
1919 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1921 /* First let's determine the new maximum volume of all inputs
1922 * connected to this sink */
1923 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1924 update_real_volume(s, &s->real_volume, &s->channel_map);
1926 /* Then, let's update the real ratios/soft volumes of all inputs
1927 * connected to this sink */
1928 compute_real_ratios(s);
1931 /* Called from main thread. Only called for the root sink in shared volume
1932 * cases, except for internal recursive calls. */
1933 static void propagate_reference_volume(pa_sink *s) {
1937 pa_sink_assert_ref(s);
1938 pa_assert_ctl_context();
1939 pa_assert(PA_SINK_IS_LINKED(s->state));
1940 pa_assert(pa_sink_flat_volume_enabled(s));
1942 /* This is called whenever the sink volume changes that is not
1943 * caused by a sink input volume change. We need to fix up the
1944 * sink input volumes accordingly */
1946 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1947 pa_cvolume new_volume;
1949 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1950 propagate_reference_volume(i->origin_sink);
1952 /* Since the origin sink uses volume sharing, this input's volume
1953 * needs to be updated to match the root sink's real volume, but
1954 * that will be done later in update_shared_real_volume(). */
1958 /* This basically calculates:
1960 * i->volume := s->reference_volume * i->reference_ratio */
1962 new_volume = s->reference_volume;
1963 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
1964 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
1965 pa_sink_input_set_volume_direct(i, &new_volume);
1969 /* Called from main thread. Only called for the root sink in volume sharing
1970 * cases, except for internal recursive calls. The return value indicates
1971 * whether any reference volume actually changed. */
1972 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1974 bool reference_volume_changed;
1978 pa_sink_assert_ref(s);
1979 pa_assert(PA_SINK_IS_LINKED(s->state));
1981 pa_assert(channel_map);
1982 pa_assert(pa_cvolume_valid(v));
1985 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1987 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1988 pa_sink_set_reference_volume_direct(s, &volume);
1990 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1992 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1993 /* If the root sink's volume doesn't change, then there can't be any
1994 * changes in the other sinks in the sink tree either.
1996 * It's probably theoretically possible that even if the root sink's
1997 * volume changes slightly, some filter sink doesn't change its volume
1998 * due to rounding errors. If that happens, we still want to propagate
1999 * the changed root sink volume to the sinks connected to the
2000 * intermediate sink that didn't change its volume. This theoretical
2001 * possibility is the reason why we have that !(s->flags &
2002 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2003 * notice even if we returned here false always if
2004 * reference_volume_changed is false. */
2007 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2008 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2009 update_reference_volume(i->origin_sink, v, channel_map, false);
2015 /* Called from main thread */
2016 void pa_sink_set_volume(
2018 const pa_cvolume *volume,
2022 pa_cvolume new_reference_volume;
2025 pa_sink_assert_ref(s);
2026 pa_assert_ctl_context();
2027 pa_assert(PA_SINK_IS_LINKED(s->state));
2028 pa_assert(!volume || pa_cvolume_valid(volume));
2029 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2030 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2032 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2033 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2034 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2035 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2039 /* In case of volume sharing, the volume is set for the root sink first,
2040 * from which it's then propagated to the sharing sinks. */
2041 root_sink = pa_sink_get_master(s);
2043 if (PA_UNLIKELY(!root_sink))
2046 /* As a special exception we accept mono volumes on all sinks --
2047 * even on those with more complex channel maps */
2050 if (pa_cvolume_compatible(volume, &s->sample_spec))
2051 new_reference_volume = *volume;
2053 new_reference_volume = s->reference_volume;
2054 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2057 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2059 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2060 if (pa_sink_flat_volume_enabled(root_sink)) {
2061 /* OK, propagate this volume change back to the inputs */
2062 propagate_reference_volume(root_sink);
2064 /* And now recalculate the real volume */
2065 compute_real_volume(root_sink);
2067 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2071 /* If volume is NULL we synchronize the sink's real and
2072 * reference volumes with the stream volumes. */
2074 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2076 /* Ok, let's determine the new real volume */
2077 compute_real_volume(root_sink);
2079 /* Let's 'push' the reference volume if necessary */
2080 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2081 /* If the sink and its root don't have the same number of channels, we need to remap */
2082 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2083 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2084 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2086 /* Now that the reference volume is updated, we can update the streams'
2087 * reference ratios. */
2088 compute_reference_ratios(root_sink);
2091 if (root_sink->set_volume) {
2092 /* If we have a function set_volume(), then we do not apply a
2093 * soft volume by default. However, set_volume() is free to
2094 * apply one to root_sink->soft_volume */
2096 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2097 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2098 root_sink->set_volume(root_sink);
2101 /* If we have no function set_volume(), then the soft volume
2102 * becomes the real volume */
2103 root_sink->soft_volume = root_sink->real_volume;
2105 /* This tells the sink that soft volume and/or real volume changed */
2107 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2110 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2111 * Only to be called by sink implementor */
2112 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2114 pa_sink_assert_ref(s);
2115 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2117 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2118 pa_sink_assert_io_context(s);
2120 pa_assert_ctl_context();
2123 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2125 s->soft_volume = *volume;
2127 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2128 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2130 s->thread_info.soft_volume = s->soft_volume;
2133 /* Called from the main thread. Only called for the root sink in volume sharing
2134 * cases, except for internal recursive calls. */
2135 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2139 pa_sink_assert_ref(s);
2140 pa_assert(old_real_volume);
2141 pa_assert_ctl_context();
2142 pa_assert(PA_SINK_IS_LINKED(s->state));
2144 /* This is called when the hardware's real volume changes due to
2145 * some external event. We copy the real volume into our
2146 * reference volume and then rebuild the stream volumes based on
2147 * i->real_ratio which should stay fixed. */
2149 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2150 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2153 /* 1. Make the real volume the reference volume */
2154 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2157 if (pa_sink_flat_volume_enabled(s)) {
2159 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2160 pa_cvolume new_volume;
2162 /* 2. Since the sink's reference and real volumes are equal
2163 * now our ratios should be too. */
2164 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2166 /* 3. Recalculate the new stream reference volume based on the
2167 * reference ratio and the sink's reference volume.
2169 * This basically calculates:
2171 * i->volume = s->reference_volume * i->reference_ratio
2173 * This is identical to propagate_reference_volume() */
2174 new_volume = s->reference_volume;
2175 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2176 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2177 pa_sink_input_set_volume_direct(i, &new_volume);
2179 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2180 propagate_real_volume(i->origin_sink, old_real_volume);
2184 /* Something got changed in the hardware. It probably makes sense
2185 * to save changed hw settings given that hw volume changes not
2186 * triggered by PA are almost certainly done by the user. */
2187 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2188 s->save_volume = true;
2191 /* Called from io thread */
2192 void pa_sink_update_volume_and_mute(pa_sink *s) {
2194 pa_sink_assert_io_context(s);
2196 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2199 /* Called from main thread */
2200 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2201 pa_sink_assert_ref(s);
2202 pa_assert_ctl_context();
2203 pa_assert(PA_SINK_IS_LINKED(s->state));
2205 if (s->refresh_volume || force_refresh) {
2206 struct pa_cvolume old_real_volume;
2208 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2210 old_real_volume = s->real_volume;
2212 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2215 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2217 update_real_volume(s, &s->real_volume, &s->channel_map);
2218 propagate_real_volume(s, &old_real_volume);
2221 return &s->reference_volume;
2224 /* Called from main thread. In volume sharing cases, only the root sink may
2226 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2227 pa_cvolume old_real_volume;
2229 pa_sink_assert_ref(s);
2230 pa_assert_ctl_context();
2231 pa_assert(PA_SINK_IS_LINKED(s->state));
2232 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2234 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2236 old_real_volume = s->real_volume;
2237 update_real_volume(s, new_real_volume, &s->channel_map);
2238 propagate_real_volume(s, &old_real_volume);
2241 /* Called from main thread */
2242 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2245 pa_sink_assert_ref(s);
2246 pa_assert_ctl_context();
2248 old_muted = s->muted;
2250 if (mute == old_muted) {
2251 s->save_muted |= save;
2256 s->save_muted = save;
2258 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2259 s->set_mute_in_progress = true;
2261 s->set_mute_in_progress = false;
2264 if (!PA_SINK_IS_LINKED(s->state))
2267 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2268 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2269 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2270 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2273 /* Called from main thread */
2274 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2276 pa_sink_assert_ref(s);
2277 pa_assert_ctl_context();
2278 pa_assert(PA_SINK_IS_LINKED(s->state));
2280 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2283 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2284 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2285 pa_sink_mute_changed(s, mute);
2287 if (s->get_mute(s, &mute) >= 0)
2288 pa_sink_mute_changed(s, mute);
2295 /* Called from main thread */
2296 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2297 pa_sink_assert_ref(s);
2298 pa_assert_ctl_context();
2299 pa_assert(PA_SINK_IS_LINKED(s->state));
2301 if (s->set_mute_in_progress)
2304 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2305 * but we must have this here also, because the save parameter of
2306 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2307 * the mute state when it shouldn't be saved). */
2308 if (new_muted == s->muted)
2311 pa_sink_set_mute(s, new_muted, true);
2314 /* Called from main thread */
2315 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2316 pa_sink_assert_ref(s);
2317 pa_assert_ctl_context();
2320 pa_proplist_update(s->proplist, mode, p);
2322 if (PA_SINK_IS_LINKED(s->state)) {
2323 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2324 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2330 /* Called from main thread */
2331 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2332 void pa_sink_set_description(pa_sink *s, const char *description) {
2334 pa_sink_assert_ref(s);
2335 pa_assert_ctl_context();
2337 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2340 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2342 if (old && description && pa_streq(old, description))
2346 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2348 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2350 if (s->monitor_source) {
2353 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2354 pa_source_set_description(s->monitor_source, n);
2358 if (PA_SINK_IS_LINKED(s->state)) {
2359 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2360 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2364 /* Called from main thread */
2365 unsigned pa_sink_linked_by(pa_sink *s) {
2368 pa_sink_assert_ref(s);
2369 pa_assert_ctl_context();
2370 pa_assert(PA_SINK_IS_LINKED(s->state));
2372 ret = pa_idxset_size(s->inputs);
2374 /* We add in the number of streams connected to us here. Please
2375 * note the asymmetry to pa_sink_used_by()! */
2377 if (s->monitor_source)
2378 ret += pa_source_linked_by(s->monitor_source);
2383 /* Called from main thread */
2384 unsigned pa_sink_used_by(pa_sink *s) {
2387 pa_sink_assert_ref(s);
2388 pa_assert_ctl_context();
2389 pa_assert(PA_SINK_IS_LINKED(s->state));
2391 ret = pa_idxset_size(s->inputs);
2392 pa_assert(ret >= s->n_corked);
2394 /* Streams connected to our monitor source do not matter for
2395 * pa_sink_used_by()!.*/
2397 return ret - s->n_corked;
2400 /* Called from main thread */
2401 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2406 pa_sink_assert_ref(s);
2407 pa_assert_ctl_context();
2409 if (!PA_SINK_IS_LINKED(s->state))
2414 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2415 pa_sink_input_state_t st;
2417 if (i == ignore_input)
2420 st = pa_sink_input_get_state(i);
2422 /* We do not assert here. It is perfectly valid for a sink input to
2423 * be in the INIT state (i.e. created, marked done but not yet put)
2424 * and we should not care if it's unlinked as it won't contribute
2425 * towards our busy status.
2427 if (!PA_SINK_INPUT_IS_LINKED(st))
2430 if (st == PA_SINK_INPUT_CORKED)
2433 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2439 if (s->monitor_source)
2440 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2445 /* Called from the IO thread */
2446 static void sync_input_volumes_within_thread(pa_sink *s) {
2450 pa_sink_assert_ref(s);
2451 pa_sink_assert_io_context(s);
2453 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2454 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2457 i->thread_info.soft_volume = i->soft_volume;
2458 pa_sink_input_request_rewind(i, 0, true, false, false);
2462 /* Called from the IO thread. Only called for the root sink in volume sharing
2463 * cases, except for internal recursive calls. */
2464 static void set_shared_volume_within_thread(pa_sink *s) {
2465 pa_sink_input *i = NULL;
2468 pa_sink_assert_ref(s);
2470 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2472 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2473 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2474 set_shared_volume_within_thread(i->origin_sink);
2478 /* Called from IO thread, except when it is not */
2479 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2480 pa_sink *s = PA_SINK(o);
2481 pa_sink_assert_ref(s);
2483 switch ((pa_sink_message_t) code) {
2485 case PA_SINK_MESSAGE_ADD_INPUT: {
2486 pa_sink_input *i = PA_SINK_INPUT(userdata);
2488 /* If you change anything here, make sure to change the
2489 * sink input handling a few lines down at
2490 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2492 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2494 /* Since the caller sleeps in pa_sink_input_put(), we can
2495 * safely access data outside of thread_info even though
2498 if ((i->thread_info.sync_prev = i->sync_prev)) {
2499 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2500 pa_assert(i->sync_prev->sync_next == i);
2501 i->thread_info.sync_prev->thread_info.sync_next = i;
2504 if ((i->thread_info.sync_next = i->sync_next)) {
2505 pa_assert(i->sink == i->thread_info.sync_next->sink);
2506 pa_assert(i->sync_next->sync_prev == i);
2507 i->thread_info.sync_next->thread_info.sync_prev = i;
2510 pa_sink_input_attach(i);
2512 pa_sink_input_set_state_within_thread(i, i->state);
2514 /* The requested latency of the sink input needs to be fixed up and
2515 * then configured on the sink. If this causes the sink latency to
2516 * go down, the sink implementor is responsible for doing a rewind
2517 * in the update_requested_latency() callback to ensure that the
2518 * sink buffer doesn't contain more data than what the new latency
2521 * XXX: Does it really make sense to push this responsibility to
2522 * the sink implementors? Wouldn't it be better to do it once in
2523 * the core than many times in the modules? */
2525 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2526 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2528 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2529 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2531 /* We don't rewind here automatically. This is left to the
2532 * sink input implementor because some sink inputs need a
2533 * slow start, i.e. need some time to buffer client
2534 * samples before beginning streaming.
2536 * XXX: Does it really make sense to push this functionality to
2537 * the sink implementors? Wouldn't it be better to do it once in
2538 * the core than many times in the modules? */
2540 /* In flat volume mode we need to update the volume as
2542 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2545 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2546 pa_sink_input *i = PA_SINK_INPUT(userdata);
2548 /* If you change anything here, make sure to change the
2549 * sink input handling a few lines down at
2550 * PA_SINK_MESSAGE_START_MOVE, too. */
2552 pa_sink_input_detach(i);
2554 pa_sink_input_set_state_within_thread(i, i->state);
2556 /* Since the caller sleeps in pa_sink_input_unlink(),
2557 * we can safely access data outside of thread_info even
2558 * though it is mutable */
2560 pa_assert(!i->sync_prev);
2561 pa_assert(!i->sync_next);
2563 if (i->thread_info.sync_prev) {
2564 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2565 i->thread_info.sync_prev = NULL;
2568 if (i->thread_info.sync_next) {
2569 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2570 i->thread_info.sync_next = NULL;
2573 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2574 pa_sink_invalidate_requested_latency(s, true);
2575 pa_sink_request_rewind(s, (size_t) -1);
2577 /* In flat volume mode we need to update the volume as
2579 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2582 case PA_SINK_MESSAGE_START_MOVE: {
2583 pa_sink_input *i = PA_SINK_INPUT(userdata);
2585 /* We don't support moving synchronized streams. */
2586 pa_assert(!i->sync_prev);
2587 pa_assert(!i->sync_next);
2588 pa_assert(!i->thread_info.sync_next);
2589 pa_assert(!i->thread_info.sync_prev);
2591 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2593 size_t sink_nbytes, total_nbytes;
2595 /* The old sink probably has some audio from this
2596 * stream in its buffer. We want to "take it back" as
2597 * much as possible and play it to the new sink. We
2598 * don't know at this point how much the old sink can
2599 * rewind. We have to pick something, and that
2600 * something is the full latency of the old sink here.
2601 * So we rewind the stream buffer by the sink latency
2602 * amount, which may be more than what we should
2603 * rewind. This can result in a chunk of audio being
2604 * played both to the old sink and the new sink.
2606 * FIXME: Fix this code so that we don't have to make
2607 * guesses about how much the sink will actually be
2608 * able to rewind. If someone comes up with a solution
2609 * for this, something to note is that the part of the
2610 * latency that the old sink couldn't rewind should
2611 * ideally be compensated after the stream has moved
2612 * to the new sink by adding silence. The new sink
2613 * most likely can't start playing the moved stream
2614 * immediately, and that gap should be removed from
2615 * the "compensation silence" (at least at the time of
2616 * writing this, the move finish code will actually
2617 * already take care of dropping the new sink's
2618 * unrewindable latency, so taking into account the
2619 * unrewindable latency of the old sink is the only
2622 * The render_memblockq contents are discarded,
2623 * because when the sink changes, the format of the
2624 * audio stored in the render_memblockq may change
2625 * too, making the stored audio invalid. FIXME:
2626 * However, the read and write indices are moved back
2627 * the same amount, so if they are not the same now,
2628 * they won't be the same after the rewind either. If
2629 * the write index of the render_memblockq is ahead of
2630 * the read index, then the render_memblockq will feed
2631 * the new sink some silence first, which it shouldn't
2632 * do. The write index should be flushed to be the
2633 * same as the read index. */
2635 /* Get the latency of the sink */
2636 usec = pa_sink_get_latency_within_thread(s, false);
2637 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2638 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2640 if (total_nbytes > 0) {
2641 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2642 i->thread_info.rewrite_flush = true;
2643 pa_sink_input_process_rewind(i, sink_nbytes);
2647 pa_sink_input_detach(i);
2649 /* Let's remove the sink input ...*/
2650 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2652 pa_sink_invalidate_requested_latency(s, true);
2654 pa_log_debug("Requesting rewind due to started move");
2655 pa_sink_request_rewind(s, (size_t) -1);
2657 /* In flat volume mode we need to update the volume as
2659 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2662 case PA_SINK_MESSAGE_FINISH_MOVE: {
2663 pa_sink_input *i = PA_SINK_INPUT(userdata);
2665 /* We don't support moving synchronized streams. */
2666 pa_assert(!i->sync_prev);
2667 pa_assert(!i->sync_next);
2668 pa_assert(!i->thread_info.sync_next);
2669 pa_assert(!i->thread_info.sync_prev);
2671 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2673 pa_sink_input_attach(i);
2675 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2679 /* In the ideal case the new sink would start playing
2680 * the stream immediately. That requires the sink to
2681 * be able to rewind all of its latency, which usually
2682 * isn't possible, so there will probably be some gap
2683 * before the moved stream becomes audible. We then
2684 * have two possibilities: 1) start playing the stream
2685 * from where it is now, or 2) drop the unrewindable
2686 * latency of the sink from the stream. With option 1
2687 * we won't lose any audio but the stream will have a
2688 * pause. With option 2 we may lose some audio but the
2689 * stream time will be somewhat in sync with the wall
2690 * clock. Lennart seems to have chosen option 2 (one
2691 * of the reasons might have been that option 1 is
2692 * actually much harder to implement), so we drop the
2693 * latency of the new sink from the moved stream and
2694 * hope that the sink will undo most of that in the
2697 /* Get the latency of the sink */
2698 usec = pa_sink_get_latency_within_thread(s, false);
2699 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2702 pa_sink_input_drop(i, nbytes);
2704 pa_log_debug("Requesting rewind due to finished move");
2705 pa_sink_request_rewind(s, nbytes);
2708 /* Updating the requested sink latency has to be done
2709 * after the sink rewind request, not before, because
2710 * otherwise the sink may limit the rewind amount
2713 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2714 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2716 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2717 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2719 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2722 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2723 pa_sink *root_sink = pa_sink_get_master(s);
2725 if (PA_LIKELY(root_sink))
2726 set_shared_volume_within_thread(root_sink);
2731 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2733 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2735 pa_sink_volume_change_push(s);
2737 /* Fall through ... */
2739 case PA_SINK_MESSAGE_SET_VOLUME:
2741 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2742 s->thread_info.soft_volume = s->soft_volume;
2743 pa_sink_request_rewind(s, (size_t) -1);
2746 /* Fall through ... */
2748 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2749 sync_input_volumes_within_thread(s);
2752 case PA_SINK_MESSAGE_GET_VOLUME:
2754 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2756 pa_sink_volume_change_flush(s);
2757 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2760 /* In case sink implementor reset SW volume. */
2761 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2762 s->thread_info.soft_volume = s->soft_volume;
2763 pa_sink_request_rewind(s, (size_t) -1);
2768 case PA_SINK_MESSAGE_SET_MUTE:
2770 if (s->thread_info.soft_muted != s->muted) {
2771 s->thread_info.soft_muted = s->muted;
2772 pa_sink_request_rewind(s, (size_t) -1);
2775 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2780 case PA_SINK_MESSAGE_GET_MUTE:
2782 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2783 return s->get_mute(s, userdata);
2787 case PA_SINK_MESSAGE_SET_STATE: {
2789 bool suspend_change =
2790 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2791 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2793 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2795 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2796 s->thread_info.rewind_nbytes = 0;
2797 s->thread_info.rewind_requested = false;
2800 if (suspend_change) {
2804 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2805 if (i->suspend_within_thread)
2806 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2812 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2814 pa_usec_t *usec = userdata;
2815 *usec = pa_sink_get_requested_latency_within_thread(s);
2817 /* Yes, that's right, the IO thread will see -1 when no
2818 * explicit requested latency is configured, the main
2819 * thread will see max_latency */
2820 if (*usec == (pa_usec_t) -1)
2821 *usec = s->thread_info.max_latency;
2826 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2827 pa_usec_t *r = userdata;
2829 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2834 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2835 pa_usec_t *r = userdata;
2837 r[0] = s->thread_info.min_latency;
2838 r[1] = s->thread_info.max_latency;
2843 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2845 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2848 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2850 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2853 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2855 *((size_t*) userdata) = s->thread_info.max_rewind;
2858 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2860 *((size_t*) userdata) = s->thread_info.max_request;
2863 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2865 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2868 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2870 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2873 case PA_SINK_MESSAGE_SET_PORT:
2875 pa_assert(userdata);
2877 struct sink_message_set_port *msg_data = userdata;
2878 msg_data->ret = s->set_port(s, msg_data->port);
2882 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2883 /* This message is sent from IO-thread and handled in main thread. */
2884 pa_assert_ctl_context();
2886 /* Make sure we're not messing with main thread when no longer linked */
2887 if (!PA_SINK_IS_LINKED(s->state))
2890 pa_sink_get_volume(s, true);
2891 pa_sink_get_mute(s, true);
2894 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
2895 s->thread_info.port_latency_offset = offset;
2898 case PA_SINK_MESSAGE_GET_LATENCY:
2899 case PA_SINK_MESSAGE_MAX:
2906 /* Called from main thread */
2907 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2912 pa_core_assert_ref(c);
2913 pa_assert_ctl_context();
2914 pa_assert(cause != 0);
2916 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2919 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2926 /* Called from IO thread */
2927 void pa_sink_detach_within_thread(pa_sink *s) {
2931 pa_sink_assert_ref(s);
2932 pa_sink_assert_io_context(s);
2933 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2935 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2936 pa_sink_input_detach(i);
2938 if (s->monitor_source)
2939 pa_source_detach_within_thread(s->monitor_source);
2942 /* Called from IO thread */
2943 void pa_sink_attach_within_thread(pa_sink *s) {
2947 pa_sink_assert_ref(s);
2948 pa_sink_assert_io_context(s);
2949 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2951 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2952 pa_sink_input_attach(i);
2954 if (s->monitor_source)
2955 pa_source_attach_within_thread(s->monitor_source);
2958 /* Called from IO thread */
2959 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2960 pa_sink_assert_ref(s);
2961 pa_sink_assert_io_context(s);
2962 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2964 if (nbytes == (size_t) -1)
2965 nbytes = s->thread_info.max_rewind;
2967 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2969 if (s->thread_info.rewind_requested &&
2970 nbytes <= s->thread_info.rewind_nbytes)
2973 s->thread_info.rewind_nbytes = nbytes;
2974 s->thread_info.rewind_requested = true;
2976 if (s->request_rewind)
2977 s->request_rewind(s);
2980 /* Called from IO thread */
2981 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2982 pa_usec_t result = (pa_usec_t) -1;
2985 pa_usec_t monitor_latency;
2987 pa_sink_assert_ref(s);
2988 pa_sink_assert_io_context(s);
2990 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2991 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2993 if (s->thread_info.requested_latency_valid)
2994 return s->thread_info.requested_latency;
2996 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2997 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2998 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2999 result = i->thread_info.requested_sink_latency;
3001 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3003 if (monitor_latency != (pa_usec_t) -1 &&
3004 (result == (pa_usec_t) -1 || result > monitor_latency))
3005 result = monitor_latency;
3007 if (result != (pa_usec_t) -1)
3008 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3010 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3011 /* Only cache if properly initialized */
3012 s->thread_info.requested_latency = result;
3013 s->thread_info.requested_latency_valid = true;
3019 /* Called from main thread */
3020 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3023 pa_sink_assert_ref(s);
3024 pa_assert_ctl_context();
3025 pa_assert(PA_SINK_IS_LINKED(s->state));
3027 if (s->state == PA_SINK_SUSPENDED)
3030 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3035 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3036 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3040 pa_sink_assert_ref(s);
3041 pa_sink_assert_io_context(s);
3043 if (max_rewind == s->thread_info.max_rewind)
3046 s->thread_info.max_rewind = max_rewind;
3048 if (PA_SINK_IS_LINKED(s->thread_info.state))
3049 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3050 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3052 if (s->monitor_source)
3053 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3056 /* Called from main thread */
3057 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3058 pa_sink_assert_ref(s);
3059 pa_assert_ctl_context();
3061 if (PA_SINK_IS_LINKED(s->state))
3062 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3064 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3067 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3068 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3071 pa_sink_assert_ref(s);
3072 pa_sink_assert_io_context(s);
3074 if (max_request == s->thread_info.max_request)
3077 s->thread_info.max_request = max_request;
3079 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3082 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3083 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3087 /* Called from main thread */
3088 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3089 pa_sink_assert_ref(s);
3090 pa_assert_ctl_context();
3092 if (PA_SINK_IS_LINKED(s->state))
3093 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3095 pa_sink_set_max_request_within_thread(s, max_request);
3098 /* Called from IO thread */
3099 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3103 pa_sink_assert_ref(s);
3104 pa_sink_assert_io_context(s);
3106 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3107 s->thread_info.requested_latency_valid = false;
3111 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3113 if (s->update_requested_latency)
3114 s->update_requested_latency(s);
3116 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3117 if (i->update_sink_requested_latency)
3118 i->update_sink_requested_latency(i);
3122 /* Called from main thread */
3123 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3124 pa_sink_assert_ref(s);
3125 pa_assert_ctl_context();
3127 /* min_latency == 0: no limit
3128 * min_latency anything else: specified limit
3130 * Similar for max_latency */
3132 if (min_latency < ABSOLUTE_MIN_LATENCY)
3133 min_latency = ABSOLUTE_MIN_LATENCY;
3135 if (max_latency <= 0 ||
3136 max_latency > ABSOLUTE_MAX_LATENCY)
3137 max_latency = ABSOLUTE_MAX_LATENCY;
3139 pa_assert(min_latency <= max_latency);
3141 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3142 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3143 max_latency == ABSOLUTE_MAX_LATENCY) ||
3144 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3146 if (PA_SINK_IS_LINKED(s->state)) {
3152 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3154 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3157 /* Called from main thread */
3158 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3159 pa_sink_assert_ref(s);
3160 pa_assert_ctl_context();
3161 pa_assert(min_latency);
3162 pa_assert(max_latency);
3164 if (PA_SINK_IS_LINKED(s->state)) {
3165 pa_usec_t r[2] = { 0, 0 };
3167 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3169 *min_latency = r[0];
3170 *max_latency = r[1];
3172 *min_latency = s->thread_info.min_latency;
3173 *max_latency = s->thread_info.max_latency;
3177 /* Called from IO thread */
3178 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3179 pa_sink_assert_ref(s);
3180 pa_sink_assert_io_context(s);
3182 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3183 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3184 pa_assert(min_latency <= max_latency);
3186 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3187 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3188 max_latency == ABSOLUTE_MAX_LATENCY) ||
3189 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3191 if (s->thread_info.min_latency == min_latency &&
3192 s->thread_info.max_latency == max_latency)
3195 s->thread_info.min_latency = min_latency;
3196 s->thread_info.max_latency = max_latency;
3198 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3202 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3203 if (i->update_sink_latency_range)
3204 i->update_sink_latency_range(i);
3207 pa_sink_invalidate_requested_latency(s, false);
3209 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3212 /* Called from main thread */
3213 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3214 pa_sink_assert_ref(s);
3215 pa_assert_ctl_context();
3217 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3218 pa_assert(latency == 0);
3222 if (latency < ABSOLUTE_MIN_LATENCY)
3223 latency = ABSOLUTE_MIN_LATENCY;
3225 if (latency > ABSOLUTE_MAX_LATENCY)
3226 latency = ABSOLUTE_MAX_LATENCY;
3228 if (PA_SINK_IS_LINKED(s->state))
3229 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3231 s->thread_info.fixed_latency = latency;
3233 pa_source_set_fixed_latency(s->monitor_source, latency);
3236 /* Called from main thread */
3237 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3240 pa_sink_assert_ref(s);
3241 pa_assert_ctl_context();
3243 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3246 if (PA_SINK_IS_LINKED(s->state))
3247 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3249 latency = s->thread_info.fixed_latency;
3254 /* Called from IO thread */
3255 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3256 pa_sink_assert_ref(s);
3257 pa_sink_assert_io_context(s);
3259 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3260 pa_assert(latency == 0);
3261 s->thread_info.fixed_latency = 0;
3263 if (s->monitor_source)
3264 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3269 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3270 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3272 if (s->thread_info.fixed_latency == latency)
3275 s->thread_info.fixed_latency = latency;
3277 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3281 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3282 if (i->update_sink_fixed_latency)
3283 i->update_sink_fixed_latency(i);
3286 pa_sink_invalidate_requested_latency(s, false);
3288 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3291 /* Called from main context */
3292 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3293 pa_sink_assert_ref(s);
3295 s->port_latency_offset = offset;
3297 if (PA_SINK_IS_LINKED(s->state))
3298 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3300 s->thread_info.port_latency_offset = offset;
3302 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3305 /* Called from main context */
3306 size_t pa_sink_get_max_rewind(pa_sink *s) {
3308 pa_assert_ctl_context();
3309 pa_sink_assert_ref(s);
3311 if (!PA_SINK_IS_LINKED(s->state))
3312 return s->thread_info.max_rewind;
3314 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3319 /* Called from main context */
3320 size_t pa_sink_get_max_request(pa_sink *s) {
3322 pa_sink_assert_ref(s);
3323 pa_assert_ctl_context();
3325 if (!PA_SINK_IS_LINKED(s->state))
3326 return s->thread_info.max_request;
3328 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3333 /* Called from main context */
3334 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3335 pa_device_port *port;
3338 pa_sink_assert_ref(s);
3339 pa_assert_ctl_context();
3342 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3343 return -PA_ERR_NOTIMPLEMENTED;
3347 return -PA_ERR_NOENTITY;
3349 if (!(port = pa_hashmap_get(s->ports, name)))
3350 return -PA_ERR_NOENTITY;
3352 if (s->active_port == port) {
3353 s->save_port = s->save_port || save;
3357 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3358 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3359 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3363 ret = s->set_port(s, port);
3366 return -PA_ERR_NOENTITY;
3368 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3370 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3372 s->active_port = port;
3373 s->save_port = save;
3375 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3377 /* The active port affects the default sink selection. */
3378 pa_core_update_default_sink(s->core);
3380 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3385 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3386 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3390 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3393 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3395 if (pa_streq(ff, "microphone"))
3396 t = "audio-input-microphone";
3397 else if (pa_streq(ff, "webcam"))
3399 else if (pa_streq(ff, "computer"))
3401 else if (pa_streq(ff, "handset"))
3403 else if (pa_streq(ff, "portable"))
3404 t = "multimedia-player";
3405 else if (pa_streq(ff, "tv"))
3406 t = "video-display";
3409 * The following icons are not part of the icon naming spec,
3410 * because Rodney Dawes sucks as the maintainer of that spec.
3412 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3414 else if (pa_streq(ff, "headset"))
3415 t = "audio-headset";
3416 else if (pa_streq(ff, "headphone"))
3417 t = "audio-headphones";
3418 else if (pa_streq(ff, "speaker"))
3419 t = "audio-speakers";
3420 else if (pa_streq(ff, "hands-free"))
3421 t = "audio-handsfree";
3425 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3426 if (pa_streq(c, "modem"))
3433 t = "audio-input-microphone";
3436 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3437 if (strstr(profile, "analog"))
3439 else if (strstr(profile, "iec958"))
3441 else if (strstr(profile, "hdmi"))
3445 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3447 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3452 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3453 const char *s, *d = NULL, *k;
3456 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3460 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3464 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3465 if (pa_streq(s, "internal"))
3466 d = _("Built-in Audio");
3469 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3470 if (pa_streq(s, "modem"))
3474 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3479 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3482 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3484 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3489 bool pa_device_init_intended_roles(pa_proplist *p) {
3493 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3496 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3497 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3498 || pa_streq(s, "headset")) {
3499 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3506 unsigned pa_device_init_priority(pa_proplist *p) {
3508 unsigned priority = 0;
3512 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3514 if (pa_streq(s, "sound"))
3516 else if (!pa_streq(s, "modem"))
3520 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3522 if (pa_streq(s, "headphone"))
3524 else if (pa_streq(s, "hifi"))
3526 else if (pa_streq(s, "speaker"))
3528 else if (pa_streq(s, "portable"))
3530 else if (pa_streq(s, "internal"))
3534 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3536 if (pa_streq(s, "bluetooth"))
3538 else if (pa_streq(s, "usb"))
3540 else if (pa_streq(s, "pci"))
3544 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3546 if (pa_startswith(s, "analog-"))
3548 else if (pa_startswith(s, "iec958-"))
3555 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3557 /* Called from the IO thread. */
3558 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3559 pa_sink_volume_change *c;
3560 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3561 c = pa_xnew(pa_sink_volume_change, 1);
3563 PA_LLIST_INIT(pa_sink_volume_change, c);
3565 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3569 /* Called from the IO thread. */
3570 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3572 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3576 /* Called from the IO thread. */
3577 void pa_sink_volume_change_push(pa_sink *s) {
3578 pa_sink_volume_change *c = NULL;
3579 pa_sink_volume_change *nc = NULL;
3580 pa_sink_volume_change *pc = NULL;
3581 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3583 const char *direction = NULL;
3586 nc = pa_sink_volume_change_new(s);
3588 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3589 * Adding one more volume for HW would get us rid of this, but I am trying
3590 * to survive with the ones we already have. */
3591 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3593 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3594 pa_log_debug("Volume not changing");
3595 pa_sink_volume_change_free(nc);
3599 nc->at = pa_sink_get_latency_within_thread(s, false);
3600 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3602 if (s->thread_info.volume_changes_tail) {
3603 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3604 /* If volume is going up let's do it a bit late. If it is going
3605 * down let's do it a bit early. */
3606 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3607 if (nc->at + safety_margin > c->at) {
3608 nc->at += safety_margin;
3613 else if (nc->at - safety_margin > c->at) {
3614 nc->at -= safety_margin;
3622 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3623 nc->at += safety_margin;
3626 nc->at -= safety_margin;
3629 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3632 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3635 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3637 /* We can ignore volume events that came earlier but should happen later than this. */
3638 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3639 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3640 pa_sink_volume_change_free(c);
3643 s->thread_info.volume_changes_tail = nc;
3646 /* Called from the IO thread. */
3647 static void pa_sink_volume_change_flush(pa_sink *s) {
3648 pa_sink_volume_change *c = s->thread_info.volume_changes;
3650 s->thread_info.volume_changes = NULL;
3651 s->thread_info.volume_changes_tail = NULL;
3653 pa_sink_volume_change *next = c->next;
3654 pa_sink_volume_change_free(c);
3659 /* Called from the IO thread. */
3660 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3666 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3672 pa_assert(s->write_volume);
3674 now = pa_rtclock_now();
3676 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3677 pa_sink_volume_change *c = s->thread_info.volume_changes;
3678 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3679 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3680 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3682 s->thread_info.current_hw_volume = c->hw_volume;
3683 pa_sink_volume_change_free(c);
3689 if (s->thread_info.volume_changes) {
3691 *usec_to_next = s->thread_info.volume_changes->at - now;
3692 if (pa_log_ratelimit(PA_LOG_DEBUG))
3693 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3698 s->thread_info.volume_changes_tail = NULL;
3703 /* Called from the IO thread. */
3704 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3705 /* All the queued volume events later than current latency are shifted to happen earlier. */
3706 pa_sink_volume_change *c;
3707 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3708 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3709 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3711 pa_log_debug("latency = %lld", (long long) limit);
3712 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3714 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3715 pa_usec_t modified_limit = limit;
3716 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3717 modified_limit -= s->thread_info.volume_change_safety_margin;
3719 modified_limit += s->thread_info.volume_change_safety_margin;
3720 if (c->at > modified_limit) {
3722 if (c->at < modified_limit)
3723 c->at = modified_limit;
3725 prev_vol = pa_cvolume_avg(&c->hw_volume);
3727 pa_sink_volume_change_apply(s, NULL);
3730 /* Called from the main thread */
3731 /* Gets the list of formats supported by the sink. The members and idxset must
3732 * be freed by the caller. */
3733 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3738 if (s->get_formats) {
3739 /* Sink supports format query, all is good */
3740 ret = s->get_formats(s);
3742 /* Sink doesn't support format query, so assume it does PCM */
3743 pa_format_info *f = pa_format_info_new();
3744 f->encoding = PA_ENCODING_PCM;
3746 ret = pa_idxset_new(NULL, NULL);
3747 pa_idxset_put(ret, f, NULL);
3753 /* Called from the main thread */
3754 /* Allows an external source to set what formats a sink supports if the sink
3755 * permits this. The function makes a copy of the formats on success. */
3756 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3761 /* Sink supports setting formats -- let's give it a shot */
3762 return s->set_formats(s, formats);
3764 /* Sink doesn't support setting this -- bail out */
3768 /* Called from the main thread */
3769 /* Checks if the sink can accept this format */
3770 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3771 pa_idxset *formats = NULL;
3777 formats = pa_sink_get_formats(s);
3780 pa_format_info *finfo_device;
3783 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3784 if (pa_format_info_is_compatible(finfo_device, f)) {
3790 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3796 /* Called from the main thread */
3797 /* Calculates the intersection between formats supported by the sink and
3798 * in_formats, and returns these, in the order of the sink's formats. */
3799 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3800 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3801 pa_format_info *f_sink, *f_in;
3806 if (!in_formats || pa_idxset_isempty(in_formats))
3809 sink_formats = pa_sink_get_formats(s);
3811 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3812 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3813 if (pa_format_info_is_compatible(f_sink, f_in))
3814 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3820 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3825 /* Called from the main thread. */
3826 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3827 pa_cvolume old_volume;
3828 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3829 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3834 old_volume = s->reference_volume;
3836 if (pa_cvolume_equal(volume, &old_volume))
3839 s->reference_volume = *volume;
3840 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3841 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3842 s->flags & PA_SINK_DECIBEL_VOLUME),
3843 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3844 s->flags & PA_SINK_DECIBEL_VOLUME));
3846 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3847 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);