2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
60 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
62 struct pa_sink_volume_change {
66 PA_LLIST_FIELDS(pa_sink_volume_change);
69 struct sink_message_set_port {
74 static void sink_free(pa_object *s);
76 static void pa_sink_volume_change_push(pa_sink *s);
77 static void pa_sink_volume_change_flush(pa_sink *s);
78 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
80 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
84 data->proplist = pa_proplist_new();
85 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
90 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
94 data->name = pa_xstrdup(name);
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
100 if ((data->sample_spec_is_set = !!spec))
101 data->sample_spec = *spec;
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
107 if ((data->channel_map_is_set = !!map))
108 data->channel_map = *map;
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
114 data->alternate_sample_rate_is_set = true;
115 data->alternate_sample_rate = alternate_sample_rate;
118 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
121 if ((data->volume_is_set = !!volume))
122 data->volume = *volume;
125 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
128 data->muted_is_set = true;
129 data->muted = !!mute;
132 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
135 pa_xfree(data->active_port);
136 data->active_port = pa_xstrdup(port);
139 void pa_sink_new_data_done(pa_sink_new_data *data) {
142 pa_proplist_free(data->proplist);
145 pa_hashmap_free(data->ports);
147 pa_xfree(data->name);
148 pa_xfree(data->active_port);
151 /* Called from main context */
152 static void reset_callbacks(pa_sink *s) {
156 s->get_volume = NULL;
157 s->set_volume = NULL;
158 s->write_volume = NULL;
161 s->request_rewind = NULL;
162 s->update_requested_latency = NULL;
164 s->get_formats = NULL;
165 s->set_formats = NULL;
166 s->update_rate = NULL;
169 /* Called from main context */
170 pa_sink* pa_sink_new(
172 pa_sink_new_data *data,
173 pa_sink_flags_t flags) {
177 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
178 pa_source_new_data source_data;
184 pa_assert(data->name);
185 pa_assert_ctl_context();
187 s = pa_msgobject_new(pa_sink);
189 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
190 pa_log_debug("Failed to register name %s.", data->name);
195 pa_sink_new_data_set_name(data, name);
197 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
199 pa_namereg_unregister(core, name);
203 /* FIXME, need to free s here on failure */
205 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
206 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
208 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
210 if (!data->channel_map_is_set)
211 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
213 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
214 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
216 /* FIXME: There should probably be a general function for checking whether
217 * the sink volume is allowed to be set, like there is for sink inputs. */
218 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
220 if (!data->volume_is_set) {
221 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
222 data->save_volume = false;
225 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
226 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
228 if (!data->muted_is_set)
232 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
234 pa_device_init_description(data->proplist);
235 pa_device_init_icon(data->proplist, true);
236 pa_device_init_intended_roles(data->proplist);
238 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
240 pa_namereg_unregister(core, name);
244 s->parent.parent.free = sink_free;
245 s->parent.process_msg = pa_sink_process_msg;
248 s->state = PA_SINK_INIT;
251 s->suspend_cause = data->suspend_cause;
252 pa_sink_set_mixer_dirty(s, false);
253 s->name = pa_xstrdup(name);
254 s->proplist = pa_proplist_copy(data->proplist);
255 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
256 s->module = data->module;
257 s->card = data->card;
259 s->priority = pa_device_init_priority(s->proplist);
261 s->sample_spec = data->sample_spec;
262 s->channel_map = data->channel_map;
263 s->default_sample_rate = s->sample_spec.rate;
265 if (data->alternate_sample_rate_is_set)
266 s->alternate_sample_rate = data->alternate_sample_rate;
268 s->alternate_sample_rate = s->core->alternate_sample_rate;
270 if (s->sample_spec.rate == s->alternate_sample_rate) {
271 pa_log_warn("Default and alternate sample rates are the same.");
272 s->alternate_sample_rate = 0;
275 s->inputs = pa_idxset_new(NULL, NULL);
277 s->input_to_master = NULL;
279 s->reference_volume = s->real_volume = data->volume;
280 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
281 s->base_volume = PA_VOLUME_NORM;
282 s->n_volume_steps = PA_VOLUME_NORM+1;
283 s->muted = data->muted;
284 s->refresh_volume = s->refresh_muted = false;
291 /* As a minor optimization we just steal the list instead of
293 s->ports = data->ports;
296 s->active_port = NULL;
297 s->save_port = false;
299 if (data->active_port)
300 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
301 s->save_port = data->save_port;
303 if (!s->active_port) {
307 PA_HASHMAP_FOREACH(p, s->ports, state)
308 if (!s->active_port || p->priority > s->active_port->priority)
313 s->latency_offset = s->active_port->latency_offset;
315 s->latency_offset = 0;
317 s->save_volume = data->save_volume;
318 s->save_muted = data->save_muted;
320 pa_silence_memchunk_get(
321 &core->silence_cache,
327 s->thread_info.rtpoll = NULL;
328 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
329 (pa_free_cb_t) pa_sink_input_unref);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = false;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = false;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.latency_offset = s->latency_offset;
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
382 pa_source_new_data_done(&source_data);
384 if (!s->monitor_source) {
390 s->monitor_source->monitor_of = s;
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
403 pa_sink_state_t original_state;
406 pa_assert_ctl_context();
408 if (s->state == state)
411 original_state = s->state;
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
418 if ((ret = s->set_state(s, state)) < 0)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
425 s->set_state(s, original_state);
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
437 if (suspend_change) {
441 /* We're suspending or resuming, tell everyone about it */
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
448 i->suspend(i, state == PA_SINK_SUSPENDED);
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
467 pa_assert(!s->write_volume || cb);
471 /* Save the current flags so we can tell if they've changed */
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
492 pa_assert(!cb || s->set_volume);
494 s->write_volume = cb;
496 /* Save the current flags so we can tell if they've changed */
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
522 /* Save the current flags so we can tell if they've changed */
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
535 static void enable_flat_volume(pa_sink *s, bool enable) {
536 pa_sink_flags_t flags;
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
543 /* Save the current flags so we can tell if they've changed */
547 s->flags |= PA_SINK_FLAT_VOLUME;
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
556 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
557 pa_sink_flags_t flags;
561 /* Save the current flags so we can tell if they've changed */
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, true);
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, false);
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
597 * Note: All of these flags set here can change over the life time
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
615 pa_sink_enable_decibel_volume(s, true);
617 /* If the sink implementor support DB volumes by itself, we should always
618 * try and enable flat volumes too */
619 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
620 enable_flat_volume(s, true);
622 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
623 pa_sink *root_sink = pa_sink_get_master(s);
625 pa_assert(root_sink);
627 s->reference_volume = root_sink->reference_volume;
628 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
630 s->real_volume = root_sink->real_volume;
631 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
633 /* We assume that if the sink implementor changed the default
634 * volume he did so in real_volume, because that is the usual
635 * place where he is supposed to place his changes. */
636 s->reference_volume = s->real_volume;
638 s->thread_info.soft_volume = s->soft_volume;
639 s->thread_info.soft_muted = s->muted;
640 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
642 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
643 || (s->base_volume == PA_VOLUME_NORM
644 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
645 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
646 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
647 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
650 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
651 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
652 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
654 if (s->suspend_cause)
655 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
657 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
659 pa_source_put(s->monitor_source);
661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
665 /* Called from main context */
666 void pa_sink_unlink(pa_sink* s) {
668 pa_sink_input *i, *j = NULL;
671 pa_assert_ctl_context();
673 /* Please note that pa_sink_unlink() does more than simply
674 * reversing pa_sink_put(). It also undoes the registrations
675 * already done in pa_sink_new()! */
677 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
678 * may be called multiple times on the same sink without bad
681 linked = PA_SINK_IS_LINKED(s->state);
684 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
686 if (s->state != PA_SINK_UNLINKED)
687 pa_namereg_unregister(s->core, s->name);
688 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
691 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
693 while ((i = pa_idxset_first(s->inputs, NULL))) {
695 pa_sink_input_kill(i);
700 sink_set_state(s, PA_SINK_UNLINKED);
702 s->state = PA_SINK_UNLINKED;
706 if (s->monitor_source)
707 pa_source_unlink(s->monitor_source);
710 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
711 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
715 /* Called from main context */
716 static void sink_free(pa_object *o) {
717 pa_sink *s = PA_SINK(o);
720 pa_assert_ctl_context();
721 pa_assert(pa_sink_refcnt(s) == 0);
723 if (PA_SINK_IS_LINKED(s->state))
726 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
728 if (s->monitor_source) {
729 pa_source_unref(s->monitor_source);
730 s->monitor_source = NULL;
733 pa_idxset_free(s->inputs, NULL);
734 pa_hashmap_free(s->thread_info.inputs);
736 if (s->silence.memblock)
737 pa_memblock_unref(s->silence.memblock);
743 pa_proplist_free(s->proplist);
746 pa_hashmap_free(s->ports);
751 /* Called from main context, and not while the IO thread is active, please */
752 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
753 pa_sink_assert_ref(s);
754 pa_assert_ctl_context();
758 if (s->monitor_source)
759 pa_source_set_asyncmsgq(s->monitor_source, q);
762 /* Called from main context, and not while the IO thread is active, please */
763 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
764 pa_sink_flags_t old_flags;
765 pa_sink_input *input;
768 pa_sink_assert_ref(s);
769 pa_assert_ctl_context();
771 /* For now, allow only a minimal set of flags to be changed. */
772 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
774 old_flags = s->flags;
775 s->flags = (s->flags & ~mask) | (value & mask);
777 if (s->flags == old_flags)
780 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
781 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
783 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
784 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
785 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
787 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
788 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
790 if (s->monitor_source)
791 pa_source_update_flags(s->monitor_source,
792 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
793 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
794 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
795 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
797 PA_IDXSET_FOREACH(input, s->inputs, idx) {
798 if (input->origin_sink)
799 pa_sink_update_flags(input->origin_sink, mask, value);
803 /* Called from IO context, or before _put() from main context */
804 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
805 pa_sink_assert_ref(s);
806 pa_sink_assert_io_context(s);
808 s->thread_info.rtpoll = p;
810 if (s->monitor_source)
811 pa_source_set_rtpoll(s->monitor_source, p);
814 /* Called from main context */
815 int pa_sink_update_status(pa_sink*s) {
816 pa_sink_assert_ref(s);
817 pa_assert_ctl_context();
818 pa_assert(PA_SINK_IS_LINKED(s->state));
820 if (s->state == PA_SINK_SUSPENDED)
823 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
826 /* Called from any context - must be threadsafe */
827 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
828 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
831 /* Called from main context */
832 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
833 pa_sink_assert_ref(s);
834 pa_assert_ctl_context();
835 pa_assert(PA_SINK_IS_LINKED(s->state));
836 pa_assert(cause != 0);
839 s->suspend_cause |= cause;
840 s->monitor_source->suspend_cause |= cause;
842 s->suspend_cause &= ~cause;
843 s->monitor_source->suspend_cause &= ~cause;
846 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
847 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
848 it'll be handled just fine. */
849 pa_sink_set_mixer_dirty(s, false);
850 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
851 if (s->active_port && s->set_port) {
852 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
853 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
854 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
857 s->set_port(s, s->active_port);
867 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
870 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
872 if (s->suspend_cause)
873 return sink_set_state(s, PA_SINK_SUSPENDED);
875 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
878 /* Called from main context */
879 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
880 pa_sink_input *i, *n;
883 pa_sink_assert_ref(s);
884 pa_assert_ctl_context();
885 pa_assert(PA_SINK_IS_LINKED(s->state));
890 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
891 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
893 pa_sink_input_ref(i);
895 if (pa_sink_input_start_move(i) >= 0)
898 pa_sink_input_unref(i);
904 /* Called from main context */
905 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
908 pa_sink_assert_ref(s);
909 pa_assert_ctl_context();
910 pa_assert(PA_SINK_IS_LINKED(s->state));
913 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
914 if (pa_sink_input_finish_move(i, s, save) < 0)
915 pa_sink_input_fail_move(i);
917 pa_sink_input_unref(i);
920 pa_queue_free(q, NULL);
923 /* Called from main context */
924 void pa_sink_move_all_fail(pa_queue *q) {
927 pa_assert_ctl_context();
930 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
931 pa_sink_input_fail_move(i);
932 pa_sink_input_unref(i);
935 pa_queue_free(q, NULL);
938 /* Called from IO thread context */
939 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
944 pa_sink_assert_ref(s);
945 pa_sink_assert_io_context(s);
947 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
948 size_t uf = i->thread_info.underrun_for_sink;
951 if (uf >= left_to_play) {
952 if (pa_sink_input_process_underrun(i))
955 else if (uf > result)
960 pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result, (long) left_to_play - result);
961 return left_to_play - result;
964 /* Called from IO thread context */
965 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
969 pa_sink_assert_ref(s);
970 pa_sink_assert_io_context(s);
971 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
973 /* If nobody requested this and this is actually no real rewind
974 * then we can short cut this. Please note that this means that
975 * not all rewind requests triggered upstream will always be
976 * translated in actual requests! */
977 if (!s->thread_info.rewind_requested && nbytes <= 0)
980 s->thread_info.rewind_nbytes = 0;
981 s->thread_info.rewind_requested = false;
984 pa_log_debug("Processing rewind...");
985 if (s->flags & PA_SINK_DEFERRED_VOLUME)
986 pa_sink_volume_change_rewind(s, nbytes);
989 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
990 pa_sink_input_assert_ref(i);
991 pa_sink_input_process_rewind(i, nbytes);
995 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
996 pa_source_process_rewind(s->monitor_source, nbytes);
1000 /* Called from IO thread context */
1001 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1005 size_t mixlength = *length;
1007 pa_sink_assert_ref(s);
1008 pa_sink_assert_io_context(s);
1011 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1012 pa_sink_input_assert_ref(i);
1014 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1016 if (mixlength == 0 || info->chunk.length < mixlength)
1017 mixlength = info->chunk.length;
1019 if (pa_memblock_is_silence(info->chunk.memblock)) {
1020 pa_memblock_unref(info->chunk.memblock);
1024 info->userdata = pa_sink_input_ref(i);
1026 pa_assert(info->chunk.memblock);
1027 pa_assert(info->chunk.length > 0);
1035 *length = mixlength;
1040 /* Called from IO thread context */
1041 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1045 unsigned n_unreffed = 0;
1047 pa_sink_assert_ref(s);
1048 pa_sink_assert_io_context(s);
1050 pa_assert(result->memblock);
1051 pa_assert(result->length > 0);
1053 /* We optimize for the case where the order of the inputs has not changed */
1055 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1057 pa_mix_info* m = NULL;
1059 pa_sink_input_assert_ref(i);
1061 /* Let's try to find the matching entry info the pa_mix_info array */
1062 for (j = 0; j < n; j ++) {
1064 if (info[p].userdata == i) {
1074 /* Drop read data */
1075 pa_sink_input_drop(i, result->length);
1077 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1079 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1080 void *ostate = NULL;
1081 pa_source_output *o;
1084 if (m && m->chunk.memblock) {
1086 pa_memblock_ref(c.memblock);
1087 pa_assert(result->length <= c.length);
1088 c.length = result->length;
1090 pa_memchunk_make_writable(&c, 0);
1091 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1094 pa_memblock_ref(c.memblock);
1095 pa_assert(result->length <= c.length);
1096 c.length = result->length;
1099 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1100 pa_source_output_assert_ref(o);
1101 pa_assert(o->direct_on_input == i);
1102 pa_source_post_direct(s->monitor_source, o, &c);
1105 pa_memblock_unref(c.memblock);
1110 if (m->chunk.memblock) {
1111 pa_memblock_unref(m->chunk.memblock);
1112 pa_memchunk_reset(&m->chunk);
1115 pa_sink_input_unref(m->userdata);
1122 /* Now drop references to entries that are included in the
1123 * pa_mix_info array but don't exist anymore */
1125 if (n_unreffed < n) {
1126 for (; n > 0; info++, n--) {
1128 pa_sink_input_unref(info->userdata);
1129 if (info->chunk.memblock)
1130 pa_memblock_unref(info->chunk.memblock);
1134 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1135 pa_source_post(s->monitor_source, result);
1138 /* Called from IO thread context */
1139 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1140 pa_mix_info info[MAX_MIX_CHANNELS];
1142 size_t block_size_max;
1144 pa_sink_assert_ref(s);
1145 pa_sink_assert_io_context(s);
1146 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1147 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1150 pa_assert(!s->thread_info.rewind_requested);
1151 pa_assert(s->thread_info.rewind_nbytes == 0);
1153 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1154 result->memblock = pa_memblock_ref(s->silence.memblock);
1155 result->index = s->silence.index;
1156 result->length = PA_MIN(s->silence.length, length);
1163 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1165 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1166 if (length > block_size_max)
1167 length = pa_frame_align(block_size_max, &s->sample_spec);
1169 pa_assert(length > 0);
1171 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1175 *result = s->silence;
1176 pa_memblock_ref(result->memblock);
1178 if (result->length > length)
1179 result->length = length;
1181 } else if (n == 1) {
1184 *result = info[0].chunk;
1185 pa_memblock_ref(result->memblock);
1187 if (result->length > length)
1188 result->length = length;
1190 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1192 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1193 pa_memblock_unref(result->memblock);
1194 pa_silence_memchunk_get(&s->core->silence_cache,
1199 } else if (!pa_cvolume_is_norm(&volume)) {
1200 pa_memchunk_make_writable(result, 0);
1201 pa_volume_memchunk(result, &s->sample_spec, &volume);
1205 result->memblock = pa_memblock_new(s->core->mempool, length);
1207 ptr = pa_memblock_acquire(result->memblock);
1208 result->length = pa_mix(info, n,
1211 &s->thread_info.soft_volume,
1212 s->thread_info.soft_muted);
1213 pa_memblock_release(result->memblock);
1218 inputs_drop(s, info, n, result);
1223 /* Called from IO thread context */
1224 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1225 pa_mix_info info[MAX_MIX_CHANNELS];
1227 size_t length, block_size_max;
1229 pa_sink_assert_ref(s);
1230 pa_sink_assert_io_context(s);
1231 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1233 pa_assert(target->memblock);
1234 pa_assert(target->length > 0);
1235 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1237 pa_assert(!s->thread_info.rewind_requested);
1238 pa_assert(s->thread_info.rewind_nbytes == 0);
1240 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1241 pa_silence_memchunk(target, &s->sample_spec);
1247 length = target->length;
1248 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1249 if (length > block_size_max)
1250 length = pa_frame_align(block_size_max, &s->sample_spec);
1252 pa_assert(length > 0);
1254 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1257 if (target->length > length)
1258 target->length = length;
1260 pa_silence_memchunk(target, &s->sample_spec);
1261 } else if (n == 1) {
1264 if (target->length > length)
1265 target->length = length;
1267 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1269 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1270 pa_silence_memchunk(target, &s->sample_spec);
1274 vchunk = info[0].chunk;
1275 pa_memblock_ref(vchunk.memblock);
1277 if (vchunk.length > length)
1278 vchunk.length = length;
1280 if (!pa_cvolume_is_norm(&volume)) {
1281 pa_memchunk_make_writable(&vchunk, 0);
1282 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1285 pa_memchunk_memcpy(target, &vchunk);
1286 pa_memblock_unref(vchunk.memblock);
1292 ptr = pa_memblock_acquire(target->memblock);
1294 target->length = pa_mix(info, n,
1295 (uint8_t*) ptr + target->index, length,
1297 &s->thread_info.soft_volume,
1298 s->thread_info.soft_muted);
1300 pa_memblock_release(target->memblock);
1303 inputs_drop(s, info, n, target);
1308 /* Called from IO thread context */
1309 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1313 pa_sink_assert_ref(s);
1314 pa_sink_assert_io_context(s);
1315 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1317 pa_assert(target->memblock);
1318 pa_assert(target->length > 0);
1319 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1321 pa_assert(!s->thread_info.rewind_requested);
1322 pa_assert(s->thread_info.rewind_nbytes == 0);
1324 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1325 pa_silence_memchunk(target, &s->sample_spec);
1338 pa_sink_render_into(s, &chunk);
1347 /* Called from IO thread context */
1348 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1349 pa_sink_assert_ref(s);
1350 pa_sink_assert_io_context(s);
1351 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1352 pa_assert(length > 0);
1353 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1356 pa_assert(!s->thread_info.rewind_requested);
1357 pa_assert(s->thread_info.rewind_nbytes == 0);
1361 pa_sink_render(s, length, result);
1363 if (result->length < length) {
1366 pa_memchunk_make_writable(result, length);
1368 chunk.memblock = result->memblock;
1369 chunk.index = result->index + result->length;
1370 chunk.length = length - result->length;
1372 pa_sink_render_into_full(s, &chunk);
1374 result->length = length;
1380 /* Called from main thread */
1381 int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
1383 uint32_t desired_rate = rate;
1384 uint32_t default_rate = s->default_sample_rate;
1385 uint32_t alternate_rate = s->alternate_sample_rate;
1388 bool use_alternate = false;
1390 if (rate == s->sample_spec.rate)
1393 if (!s->update_rate)
1396 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
1397 pa_log_debug("Default and alternate sample rates are the same.");
1401 if (PA_SINK_IS_RUNNING(s->state)) {
1402 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1403 s->sample_spec.rate);
1407 if (s->monitor_source) {
1408 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1409 pa_log_info("Cannot update rate, monitor source is RUNNING");
1414 if (PA_UNLIKELY (desired_rate < 8000 ||
1415 desired_rate > PA_RATE_MAX))
1419 pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
1420 pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
1422 if (default_rate % 11025 == 0) {
1423 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1426 /* default is 4000 multiple */
1427 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1432 desired_rate = alternate_rate;
1434 desired_rate = default_rate;
1436 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1439 if (desired_rate == s->sample_spec.rate)
1442 if (!passthrough && pa_sink_used_by(s) > 0)
1445 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1446 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1448 if (s->update_rate(s, desired_rate) >= 0) {
1449 /* update monitor source as well */
1450 if (s->monitor_source && !passthrough)
1451 pa_source_update_rate(s->monitor_source, desired_rate, false);
1452 pa_log_info("Changed sampling rate successfully");
1454 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1455 if (i->state == PA_SINK_INPUT_CORKED)
1456 pa_sink_input_update_rate(i);
1462 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1467 /* Called from main thread */
1468 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1471 pa_sink_assert_ref(s);
1472 pa_assert_ctl_context();
1473 pa_assert(PA_SINK_IS_LINKED(s->state));
1475 /* The returned value is supposed to be in the time domain of the sound card! */
1477 if (s->state == PA_SINK_SUSPENDED)
1480 if (!(s->flags & PA_SINK_LATENCY))
1483 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1485 /* usec is unsigned, so check that the offset can be added to usec without
1487 if (-s->latency_offset <= (int64_t) usec)
1488 usec += s->latency_offset;
1495 /* Called from IO thread */
1496 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1500 pa_sink_assert_ref(s);
1501 pa_sink_assert_io_context(s);
1502 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1504 /* The returned value is supposed to be in the time domain of the sound card! */
1506 if (s->thread_info.state == PA_SINK_SUSPENDED)
1509 if (!(s->flags & PA_SINK_LATENCY))
1512 o = PA_MSGOBJECT(s);
1514 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1516 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1519 /* usec is unsigned, so check that the offset can be added to usec without
1521 if (-s->thread_info.latency_offset <= (int64_t) usec)
1522 usec += s->thread_info.latency_offset;
1529 /* Called from the main thread (and also from the IO thread while the main
1530 * thread is waiting).
1532 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1533 * set. Instead, flat volume mode is detected by checking whether the root sink
1534 * has the flag set. */
1535 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1536 pa_sink_assert_ref(s);
1538 s = pa_sink_get_master(s);
1541 return (s->flags & PA_SINK_FLAT_VOLUME);
1546 /* Called from the main thread (and also from the IO thread while the main
1547 * thread is waiting). */
1548 pa_sink *pa_sink_get_master(pa_sink *s) {
1549 pa_sink_assert_ref(s);
1551 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1552 if (PA_UNLIKELY(!s->input_to_master))
1555 s = s->input_to_master->sink;
1561 /* Called from main context */
1562 bool pa_sink_is_passthrough(pa_sink *s) {
1563 pa_sink_input *alt_i;
1566 pa_sink_assert_ref(s);
1568 /* one and only one PASSTHROUGH input can possibly be connected */
1569 if (pa_idxset_size(s->inputs) == 1) {
1570 alt_i = pa_idxset_first(s->inputs, &idx);
1572 if (pa_sink_input_is_passthrough(alt_i))
1579 /* Called from main context */
1580 void pa_sink_enter_passthrough(pa_sink *s) {
1583 /* disable the monitor in passthrough mode */
1584 if (s->monitor_source) {
1585 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1586 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1589 /* set the volume to NORM */
1590 s->saved_volume = *pa_sink_get_volume(s, true);
1591 s->saved_save_volume = s->save_volume;
1593 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1594 pa_sink_set_volume(s, &volume, true, false);
1597 /* Called from main context */
1598 void pa_sink_leave_passthrough(pa_sink *s) {
1599 /* Unsuspend monitor */
1600 if (s->monitor_source) {
1601 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1602 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1605 /* Restore sink volume to what it was before we entered passthrough mode */
1606 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1608 pa_cvolume_init(&s->saved_volume);
1609 s->saved_save_volume = false;
1612 /* Called from main context. */
1613 static void compute_reference_ratio(pa_sink_input *i) {
1615 pa_cvolume remapped;
1618 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1621 * Calculates the reference ratio from the sink's reference
1622 * volume. This basically calculates:
1624 * i->reference_ratio = i->volume / i->sink->reference_volume
1627 remapped = i->sink->reference_volume;
1628 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1630 i->reference_ratio.channels = i->sample_spec.channels;
1632 for (c = 0; c < i->sample_spec.channels; c++) {
1634 /* We don't update when the sink volume is 0 anyway */
1635 if (remapped.values[c] <= PA_VOLUME_MUTED)
1638 /* Don't update the reference ratio unless necessary */
1639 if (pa_sw_volume_multiply(
1640 i->reference_ratio.values[c],
1641 remapped.values[c]) == i->volume.values[c])
1644 i->reference_ratio.values[c] = pa_sw_volume_divide(
1645 i->volume.values[c],
1646 remapped.values[c]);
1650 /* Called from main context. Only called for the root sink in volume sharing
1651 * cases, except for internal recursive calls. */
1652 static void compute_reference_ratios(pa_sink *s) {
1656 pa_sink_assert_ref(s);
1657 pa_assert_ctl_context();
1658 pa_assert(PA_SINK_IS_LINKED(s->state));
1659 pa_assert(pa_sink_flat_volume_enabled(s));
1661 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1662 compute_reference_ratio(i);
1664 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1665 compute_reference_ratios(i->origin_sink);
1669 /* Called from main context. Only called for the root sink in volume sharing
1670 * cases, except for internal recursive calls. */
1671 static void compute_real_ratios(pa_sink *s) {
1675 pa_sink_assert_ref(s);
1676 pa_assert_ctl_context();
1677 pa_assert(PA_SINK_IS_LINKED(s->state));
1678 pa_assert(pa_sink_flat_volume_enabled(s));
1680 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1682 pa_cvolume remapped;
1684 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1685 /* The origin sink uses volume sharing, so this input's real ratio
1686 * is handled as a special case - the real ratio must be 0 dB, and
1687 * as a result i->soft_volume must equal i->volume_factor. */
1688 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1689 i->soft_volume = i->volume_factor;
1691 compute_real_ratios(i->origin_sink);
1697 * This basically calculates:
1699 * i->real_ratio := i->volume / s->real_volume
1700 * i->soft_volume := i->real_ratio * i->volume_factor
1703 remapped = s->real_volume;
1704 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1706 i->real_ratio.channels = i->sample_spec.channels;
1707 i->soft_volume.channels = i->sample_spec.channels;
1709 for (c = 0; c < i->sample_spec.channels; c++) {
1711 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1712 /* We leave i->real_ratio untouched */
1713 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1717 /* Don't lose accuracy unless necessary */
1718 if (pa_sw_volume_multiply(
1719 i->real_ratio.values[c],
1720 remapped.values[c]) != i->volume.values[c])
1722 i->real_ratio.values[c] = pa_sw_volume_divide(
1723 i->volume.values[c],
1724 remapped.values[c]);
1726 i->soft_volume.values[c] = pa_sw_volume_multiply(
1727 i->real_ratio.values[c],
1728 i->volume_factor.values[c]);
1731 /* We don't copy the soft_volume to the thread_info data
1732 * here. That must be done by the caller */
1736 static pa_cvolume *cvolume_remap_minimal_impact(
1738 const pa_cvolume *template,
1739 const pa_channel_map *from,
1740 const pa_channel_map *to) {
1745 pa_assert(template);
1748 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1749 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1751 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1752 * mapping from sink input to sink volumes:
1754 * If template is a possible remapping from v it is used instead
1755 * of remapping anew.
1757 * If the channel maps don't match we set an all-channel volume on
1758 * the sink to ensure that changing a volume on one stream has no
1759 * effect that cannot be compensated for in another stream that
1760 * does not have the same channel map as the sink. */
1762 if (pa_channel_map_equal(from, to))
1766 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1771 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1775 /* Called from main thread. Only called for the root sink in volume sharing
1776 * cases, except for internal recursive calls. */
1777 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1781 pa_sink_assert_ref(s);
1782 pa_assert(max_volume);
1783 pa_assert(channel_map);
1784 pa_assert(pa_sink_flat_volume_enabled(s));
1786 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1787 pa_cvolume remapped;
1789 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1790 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1792 /* Ignore this input. The origin sink uses volume sharing, so this
1793 * input's volume will be set to be equal to the root sink's real
1794 * volume. Obviously this input's current volume must not then
1795 * affect what the root sink's real volume will be. */
1799 remapped = i->volume;
1800 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1801 pa_cvolume_merge(max_volume, max_volume, &remapped);
1805 /* Called from main thread. Only called for the root sink in volume sharing
1806 * cases, except for internal recursive calls. */
1807 static bool has_inputs(pa_sink *s) {
1811 pa_sink_assert_ref(s);
1813 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1814 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1821 /* Called from main thread. Only called for the root sink in volume sharing
1822 * cases, except for internal recursive calls. */
1823 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1827 pa_sink_assert_ref(s);
1828 pa_assert(new_volume);
1829 pa_assert(channel_map);
1831 s->real_volume = *new_volume;
1832 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1834 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1835 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1836 if (pa_sink_flat_volume_enabled(s)) {
1837 pa_cvolume old_volume = i->volume;
1839 /* Follow the root sink's real volume. */
1840 i->volume = *new_volume;
1841 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1842 compute_reference_ratio(i);
1844 /* The volume changed, let's tell people so */
1845 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1846 if (i->volume_changed)
1847 i->volume_changed(i);
1849 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1853 update_real_volume(i->origin_sink, new_volume, channel_map);
1858 /* Called from main thread. Only called for the root sink in shared volume
1860 static void compute_real_volume(pa_sink *s) {
1861 pa_sink_assert_ref(s);
1862 pa_assert_ctl_context();
1863 pa_assert(PA_SINK_IS_LINKED(s->state));
1864 pa_assert(pa_sink_flat_volume_enabled(s));
1865 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1867 /* This determines the maximum volume of all streams and sets
1868 * s->real_volume accordingly. */
1870 if (!has_inputs(s)) {
1871 /* In the special case that we have no sink inputs we leave the
1872 * volume unmodified. */
1873 update_real_volume(s, &s->reference_volume, &s->channel_map);
1877 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1879 /* First let's determine the new maximum volume of all inputs
1880 * connected to this sink */
1881 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1882 update_real_volume(s, &s->real_volume, &s->channel_map);
1884 /* Then, let's update the real ratios/soft volumes of all inputs
1885 * connected to this sink */
1886 compute_real_ratios(s);
1889 /* Called from main thread. Only called for the root sink in shared volume
1890 * cases, except for internal recursive calls. */
1891 static void propagate_reference_volume(pa_sink *s) {
1895 pa_sink_assert_ref(s);
1896 pa_assert_ctl_context();
1897 pa_assert(PA_SINK_IS_LINKED(s->state));
1898 pa_assert(pa_sink_flat_volume_enabled(s));
1900 /* This is called whenever the sink volume changes that is not
1901 * caused by a sink input volume change. We need to fix up the
1902 * sink input volumes accordingly */
1904 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1905 pa_cvolume old_volume;
1907 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1908 propagate_reference_volume(i->origin_sink);
1910 /* Since the origin sink uses volume sharing, this input's volume
1911 * needs to be updated to match the root sink's real volume, but
1912 * that will be done later in update_shared_real_volume(). */
1916 old_volume = i->volume;
1918 /* This basically calculates:
1920 * i->volume := s->reference_volume * i->reference_ratio */
1922 i->volume = s->reference_volume;
1923 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1924 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1926 /* The volume changed, let's tell people so */
1927 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1929 if (i->volume_changed)
1930 i->volume_changed(i);
1932 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1937 /* Called from main thread. Only called for the root sink in volume sharing
1938 * cases, except for internal recursive calls. The return value indicates
1939 * whether any reference volume actually changed. */
1940 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1942 bool reference_volume_changed;
1946 pa_sink_assert_ref(s);
1947 pa_assert(PA_SINK_IS_LINKED(s->state));
1949 pa_assert(channel_map);
1950 pa_assert(pa_cvolume_valid(v));
1953 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1955 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1956 s->reference_volume = volume;
1958 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1960 if (reference_volume_changed)
1961 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1962 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1963 /* If the root sink's volume doesn't change, then there can't be any
1964 * changes in the other sinks in the sink tree either.
1966 * It's probably theoretically possible that even if the root sink's
1967 * volume changes slightly, some filter sink doesn't change its volume
1968 * due to rounding errors. If that happens, we still want to propagate
1969 * the changed root sink volume to the sinks connected to the
1970 * intermediate sink that didn't change its volume. This theoretical
1971 * possibility is the reason why we have that !(s->flags &
1972 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1973 * notice even if we returned here false always if
1974 * reference_volume_changed is false. */
1977 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1978 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1979 update_reference_volume(i->origin_sink, v, channel_map, false);
1985 /* Called from main thread */
1986 void pa_sink_set_volume(
1988 const pa_cvolume *volume,
1992 pa_cvolume new_reference_volume;
1995 pa_sink_assert_ref(s);
1996 pa_assert_ctl_context();
1997 pa_assert(PA_SINK_IS_LINKED(s->state));
1998 pa_assert(!volume || pa_cvolume_valid(volume));
1999 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2000 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2002 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2003 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2004 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2005 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2009 /* In case of volume sharing, the volume is set for the root sink first,
2010 * from which it's then propagated to the sharing sinks. */
2011 root_sink = pa_sink_get_master(s);
2013 if (PA_UNLIKELY(!root_sink))
2016 /* As a special exception we accept mono volumes on all sinks --
2017 * even on those with more complex channel maps */
2020 if (pa_cvolume_compatible(volume, &s->sample_spec))
2021 new_reference_volume = *volume;
2023 new_reference_volume = s->reference_volume;
2024 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2027 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2029 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2030 if (pa_sink_flat_volume_enabled(root_sink)) {
2031 /* OK, propagate this volume change back to the inputs */
2032 propagate_reference_volume(root_sink);
2034 /* And now recalculate the real volume */
2035 compute_real_volume(root_sink);
2037 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2041 /* If volume is NULL we synchronize the sink's real and
2042 * reference volumes with the stream volumes. */
2044 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2046 /* Ok, let's determine the new real volume */
2047 compute_real_volume(root_sink);
2049 /* Let's 'push' the reference volume if necessary */
2050 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2051 /* If the sink and it's root don't have the same number of channels, we need to remap */
2052 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2053 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2054 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2056 /* Now that the reference volume is updated, we can update the streams'
2057 * reference ratios. */
2058 compute_reference_ratios(root_sink);
2061 if (root_sink->set_volume) {
2062 /* If we have a function set_volume(), then we do not apply a
2063 * soft volume by default. However, set_volume() is free to
2064 * apply one to root_sink->soft_volume */
2066 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2067 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2068 root_sink->set_volume(root_sink);
2071 /* If we have no function set_volume(), then the soft volume
2072 * becomes the real volume */
2073 root_sink->soft_volume = root_sink->real_volume;
2075 /* This tells the sink that soft volume and/or real volume changed */
2077 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2080 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2081 * Only to be called by sink implementor */
2082 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2084 pa_sink_assert_ref(s);
2085 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2087 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2088 pa_sink_assert_io_context(s);
2090 pa_assert_ctl_context();
2093 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2095 s->soft_volume = *volume;
2097 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2098 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2100 s->thread_info.soft_volume = s->soft_volume;
2103 /* Called from the main thread. Only called for the root sink in volume sharing
2104 * cases, except for internal recursive calls. */
2105 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2109 pa_sink_assert_ref(s);
2110 pa_assert(old_real_volume);
2111 pa_assert_ctl_context();
2112 pa_assert(PA_SINK_IS_LINKED(s->state));
2114 /* This is called when the hardware's real volume changes due to
2115 * some external event. We copy the real volume into our
2116 * reference volume and then rebuild the stream volumes based on
2117 * i->real_ratio which should stay fixed. */
2119 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2120 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2123 /* 1. Make the real volume the reference volume */
2124 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2127 if (pa_sink_flat_volume_enabled(s)) {
2129 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2130 pa_cvolume old_volume = i->volume;
2132 /* 2. Since the sink's reference and real volumes are equal
2133 * now our ratios should be too. */
2134 i->reference_ratio = i->real_ratio;
2136 /* 3. Recalculate the new stream reference volume based on the
2137 * reference ratio and the sink's reference volume.
2139 * This basically calculates:
2141 * i->volume = s->reference_volume * i->reference_ratio
2143 * This is identical to propagate_reference_volume() */
2144 i->volume = s->reference_volume;
2145 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2146 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2148 /* Notify if something changed */
2149 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2151 if (i->volume_changed)
2152 i->volume_changed(i);
2154 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2157 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2158 propagate_real_volume(i->origin_sink, old_real_volume);
2162 /* Something got changed in the hardware. It probably makes sense
2163 * to save changed hw settings given that hw volume changes not
2164 * triggered by PA are almost certainly done by the user. */
2165 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2166 s->save_volume = true;
2169 /* Called from io thread */
2170 void pa_sink_update_volume_and_mute(pa_sink *s) {
2172 pa_sink_assert_io_context(s);
2174 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2177 /* Called from main thread */
2178 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2179 pa_sink_assert_ref(s);
2180 pa_assert_ctl_context();
2181 pa_assert(PA_SINK_IS_LINKED(s->state));
2183 if (s->refresh_volume || force_refresh) {
2184 struct pa_cvolume old_real_volume;
2186 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2188 old_real_volume = s->real_volume;
2190 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2193 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2195 update_real_volume(s, &s->real_volume, &s->channel_map);
2196 propagate_real_volume(s, &old_real_volume);
2199 return &s->reference_volume;
2202 /* Called from main thread. In volume sharing cases, only the root sink may
2204 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2205 pa_cvolume old_real_volume;
2207 pa_sink_assert_ref(s);
2208 pa_assert_ctl_context();
2209 pa_assert(PA_SINK_IS_LINKED(s->state));
2210 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2212 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2214 old_real_volume = s->real_volume;
2215 update_real_volume(s, new_real_volume, &s->channel_map);
2216 propagate_real_volume(s, &old_real_volume);
2219 /* Called from main thread */
2220 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2223 pa_sink_assert_ref(s);
2224 pa_assert_ctl_context();
2225 pa_assert(PA_SINK_IS_LINKED(s->state));
2227 old_muted = s->muted;
2229 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2231 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2234 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2236 if (old_muted != s->muted)
2237 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2240 /* Called from main thread */
2241 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2243 pa_sink_assert_ref(s);
2244 pa_assert_ctl_context();
2245 pa_assert(PA_SINK_IS_LINKED(s->state));
2247 if (s->refresh_muted || force_refresh) {
2248 bool old_muted = s->muted;
2250 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2253 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2255 if (old_muted != s->muted) {
2256 s->save_muted = true;
2258 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2260 /* Make sure the soft mute status stays in sync */
2261 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2268 /* Called from main thread */
2269 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2270 pa_sink_assert_ref(s);
2271 pa_assert_ctl_context();
2272 pa_assert(PA_SINK_IS_LINKED(s->state));
2274 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2276 if (s->muted == new_muted)
2279 s->muted = new_muted;
2280 s->save_muted = true;
2282 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2285 /* Called from main thread */
2286 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2287 pa_sink_assert_ref(s);
2288 pa_assert_ctl_context();
2291 pa_proplist_update(s->proplist, mode, p);
2293 if (PA_SINK_IS_LINKED(s->state)) {
2294 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2295 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2301 /* Called from main thread */
2302 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2303 void pa_sink_set_description(pa_sink *s, const char *description) {
2305 pa_sink_assert_ref(s);
2306 pa_assert_ctl_context();
2308 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2311 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2313 if (old && description && pa_streq(old, description))
2317 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2319 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2321 if (s->monitor_source) {
2324 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2325 pa_source_set_description(s->monitor_source, n);
2329 if (PA_SINK_IS_LINKED(s->state)) {
2330 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2331 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2335 /* Called from main thread */
2336 unsigned pa_sink_linked_by(pa_sink *s) {
2339 pa_sink_assert_ref(s);
2340 pa_assert_ctl_context();
2341 pa_assert(PA_SINK_IS_LINKED(s->state));
2343 ret = pa_idxset_size(s->inputs);
2345 /* We add in the number of streams connected to us here. Please
2346 * note the asymmetry to pa_sink_used_by()! */
2348 if (s->monitor_source)
2349 ret += pa_source_linked_by(s->monitor_source);
2354 /* Called from main thread */
2355 unsigned pa_sink_used_by(pa_sink *s) {
2358 pa_sink_assert_ref(s);
2359 pa_assert_ctl_context();
2360 pa_assert(PA_SINK_IS_LINKED(s->state));
2362 ret = pa_idxset_size(s->inputs);
2363 pa_assert(ret >= s->n_corked);
2365 /* Streams connected to our monitor source do not matter for
2366 * pa_sink_used_by()!.*/
2368 return ret - s->n_corked;
2371 /* Called from main thread */
2372 unsigned pa_sink_check_suspend(pa_sink *s) {
2377 pa_sink_assert_ref(s);
2378 pa_assert_ctl_context();
2380 if (!PA_SINK_IS_LINKED(s->state))
2385 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2386 pa_sink_input_state_t st;
2388 st = pa_sink_input_get_state(i);
2390 /* We do not assert here. It is perfectly valid for a sink input to
2391 * be in the INIT state (i.e. created, marked done but not yet put)
2392 * and we should not care if it's unlinked as it won't contribute
2393 * towards our busy status.
2395 if (!PA_SINK_INPUT_IS_LINKED(st))
2398 if (st == PA_SINK_INPUT_CORKED)
2401 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2407 if (s->monitor_source)
2408 ret += pa_source_check_suspend(s->monitor_source);
2413 /* Called from the IO thread */
2414 static void sync_input_volumes_within_thread(pa_sink *s) {
2418 pa_sink_assert_ref(s);
2419 pa_sink_assert_io_context(s);
2421 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2422 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2425 i->thread_info.soft_volume = i->soft_volume;
2426 pa_sink_input_request_rewind(i, 0, true, false, false);
2430 /* Called from the IO thread. Only called for the root sink in volume sharing
2431 * cases, except for internal recursive calls. */
2432 static void set_shared_volume_within_thread(pa_sink *s) {
2433 pa_sink_input *i = NULL;
2436 pa_sink_assert_ref(s);
2438 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2440 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2441 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2442 set_shared_volume_within_thread(i->origin_sink);
2446 /* Called from IO thread, except when it is not */
2447 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2448 pa_sink *s = PA_SINK(o);
2449 pa_sink_assert_ref(s);
2451 switch ((pa_sink_message_t) code) {
2453 case PA_SINK_MESSAGE_ADD_INPUT: {
2454 pa_sink_input *i = PA_SINK_INPUT(userdata);
2456 /* If you change anything here, make sure to change the
2457 * sink input handling a few lines down at
2458 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2460 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2462 /* Since the caller sleeps in pa_sink_input_put(), we can
2463 * safely access data outside of thread_info even though
2466 if ((i->thread_info.sync_prev = i->sync_prev)) {
2467 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2468 pa_assert(i->sync_prev->sync_next == i);
2469 i->thread_info.sync_prev->thread_info.sync_next = i;
2472 if ((i->thread_info.sync_next = i->sync_next)) {
2473 pa_assert(i->sink == i->thread_info.sync_next->sink);
2474 pa_assert(i->sync_next->sync_prev == i);
2475 i->thread_info.sync_next->thread_info.sync_prev = i;
2478 pa_assert(!i->thread_info.attached);
2479 i->thread_info.attached = true;
2484 pa_sink_input_set_state_within_thread(i, i->state);
2486 /* The requested latency of the sink input needs to be fixed up and
2487 * then configured on the sink. If this causes the sink latency to
2488 * go down, the sink implementor is responsible for doing a rewind
2489 * in the update_requested_latency() callback to ensure that the
2490 * sink buffer doesn't contain more data than what the new latency
2493 * XXX: Does it really make sense to push this responsibility to
2494 * the sink implementors? Wouldn't it be better to do it once in
2495 * the core than many times in the modules? */
2497 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2498 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2500 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2501 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2503 /* We don't rewind here automatically. This is left to the
2504 * sink input implementor because some sink inputs need a
2505 * slow start, i.e. need some time to buffer client
2506 * samples before beginning streaming.
2508 * XXX: Does it really make sense to push this functionality to
2509 * the sink implementors? Wouldn't it be better to do it once in
2510 * the core than many times in the modules? */
2512 /* In flat volume mode we need to update the volume as
2514 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2517 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2518 pa_sink_input *i = PA_SINK_INPUT(userdata);
2520 /* If you change anything here, make sure to change the
2521 * sink input handling a few lines down at
2522 * PA_SINK_MESSAGE_START_MOVE, too. */
2527 pa_sink_input_set_state_within_thread(i, i->state);
2529 pa_assert(i->thread_info.attached);
2530 i->thread_info.attached = false;
2532 /* Since the caller sleeps in pa_sink_input_unlink(),
2533 * we can safely access data outside of thread_info even
2534 * though it is mutable */
2536 pa_assert(!i->sync_prev);
2537 pa_assert(!i->sync_next);
2539 if (i->thread_info.sync_prev) {
2540 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2541 i->thread_info.sync_prev = NULL;
2544 if (i->thread_info.sync_next) {
2545 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2546 i->thread_info.sync_next = NULL;
2549 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2550 pa_sink_input_unref(i);
2552 pa_sink_invalidate_requested_latency(s, true);
2553 pa_sink_request_rewind(s, (size_t) -1);
2555 /* In flat volume mode we need to update the volume as
2557 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2560 case PA_SINK_MESSAGE_START_MOVE: {
2561 pa_sink_input *i = PA_SINK_INPUT(userdata);
2563 /* We don't support moving synchronized streams. */
2564 pa_assert(!i->sync_prev);
2565 pa_assert(!i->sync_next);
2566 pa_assert(!i->thread_info.sync_next);
2567 pa_assert(!i->thread_info.sync_prev);
2569 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2571 size_t sink_nbytes, total_nbytes;
2573 /* The old sink probably has some audio from this
2574 * stream in its buffer. We want to "take it back" as
2575 * much as possible and play it to the new sink. We
2576 * don't know at this point how much the old sink can
2577 * rewind. We have to pick something, and that
2578 * something is the full latency of the old sink here.
2579 * So we rewind the stream buffer by the sink latency
2580 * amount, which may be more than what we should
2581 * rewind. This can result in a chunk of audio being
2582 * played both to the old sink and the new sink.
2584 * FIXME: Fix this code so that we don't have to make
2585 * guesses about how much the sink will actually be
2586 * able to rewind. If someone comes up with a solution
2587 * for this, something to note is that the part of the
2588 * latency that the old sink couldn't rewind should
2589 * ideally be compensated after the stream has moved
2590 * to the new sink by adding silence. The new sink
2591 * most likely can't start playing the moved stream
2592 * immediately, and that gap should be removed from
2593 * the "compensation silence" (at least at the time of
2594 * writing this, the move finish code will actually
2595 * already take care of dropping the new sink's
2596 * unrewindable latency, so taking into account the
2597 * unrewindable latency of the old sink is the only
2600 * The render_memblockq contents are discarded,
2601 * because when the sink changes, the format of the
2602 * audio stored in the render_memblockq may change
2603 * too, making the stored audio invalid. FIXME:
2604 * However, the read and write indices are moved back
2605 * the same amount, so if they are not the same now,
2606 * they won't be the same after the rewind either. If
2607 * the write index of the render_memblockq is ahead of
2608 * the read index, then the render_memblockq will feed
2609 * the new sink some silence first, which it shouldn't
2610 * do. The write index should be flushed to be the
2611 * same as the read index. */
2613 /* Get the latency of the sink */
2614 usec = pa_sink_get_latency_within_thread(s);
2615 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2616 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2618 if (total_nbytes > 0) {
2619 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2620 i->thread_info.rewrite_flush = true;
2621 pa_sink_input_process_rewind(i, sink_nbytes);
2628 pa_assert(i->thread_info.attached);
2629 i->thread_info.attached = false;
2631 /* Let's remove the sink input ...*/
2632 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2633 pa_sink_input_unref(i);
2635 pa_sink_invalidate_requested_latency(s, true);
2637 pa_log_debug("Requesting rewind due to started move");
2638 pa_sink_request_rewind(s, (size_t) -1);
2640 /* In flat volume mode we need to update the volume as
2642 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2645 case PA_SINK_MESSAGE_FINISH_MOVE: {
2646 pa_sink_input *i = PA_SINK_INPUT(userdata);
2648 /* We don't support moving synchronized streams. */
2649 pa_assert(!i->sync_prev);
2650 pa_assert(!i->sync_next);
2651 pa_assert(!i->thread_info.sync_next);
2652 pa_assert(!i->thread_info.sync_prev);
2654 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2656 pa_assert(!i->thread_info.attached);
2657 i->thread_info.attached = true;
2662 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2666 /* In the ideal case the new sink would start playing
2667 * the stream immediately. That requires the sink to
2668 * be able to rewind all of its latency, which usually
2669 * isn't possible, so there will probably be some gap
2670 * before the moved stream becomes audible. We then
2671 * have two possibilities: 1) start playing the stream
2672 * from where it is now, or 2) drop the unrewindable
2673 * latency of the sink from the stream. With option 1
2674 * we won't lose any audio but the stream will have a
2675 * pause. With option 2 we may lose some audio but the
2676 * stream time will be somewhat in sync with the wall
2677 * clock. Lennart seems to have chosen option 2 (one
2678 * of the reasons might have been that option 1 is
2679 * actually much harder to implement), so we drop the
2680 * latency of the new sink from the moved stream and
2681 * hope that the sink will undo most of that in the
2684 /* Get the latency of the sink */
2685 usec = pa_sink_get_latency_within_thread(s);
2686 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2689 pa_sink_input_drop(i, nbytes);
2691 pa_log_debug("Requesting rewind due to finished move");
2692 pa_sink_request_rewind(s, nbytes);
2695 /* Updating the requested sink latency has to be done
2696 * after the sink rewind request, not before, because
2697 * otherwise the sink may limit the rewind amount
2700 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2701 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2703 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2704 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2706 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2709 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2710 pa_sink *root_sink = pa_sink_get_master(s);
2712 if (PA_LIKELY(root_sink))
2713 set_shared_volume_within_thread(root_sink);
2718 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2720 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2722 pa_sink_volume_change_push(s);
2724 /* Fall through ... */
2726 case PA_SINK_MESSAGE_SET_VOLUME:
2728 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2729 s->thread_info.soft_volume = s->soft_volume;
2730 pa_sink_request_rewind(s, (size_t) -1);
2733 /* Fall through ... */
2735 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2736 sync_input_volumes_within_thread(s);
2739 case PA_SINK_MESSAGE_GET_VOLUME:
2741 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2743 pa_sink_volume_change_flush(s);
2744 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2747 /* In case sink implementor reset SW volume. */
2748 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2749 s->thread_info.soft_volume = s->soft_volume;
2750 pa_sink_request_rewind(s, (size_t) -1);
2755 case PA_SINK_MESSAGE_SET_MUTE:
2757 if (s->thread_info.soft_muted != s->muted) {
2758 s->thread_info.soft_muted = s->muted;
2759 pa_sink_request_rewind(s, (size_t) -1);
2762 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2767 case PA_SINK_MESSAGE_GET_MUTE:
2769 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2774 case PA_SINK_MESSAGE_SET_STATE: {
2776 bool suspend_change =
2777 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2778 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2780 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2782 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2783 s->thread_info.rewind_nbytes = 0;
2784 s->thread_info.rewind_requested = false;
2787 if (suspend_change) {
2791 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2792 if (i->suspend_within_thread)
2793 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2799 case PA_SINK_MESSAGE_DETACH:
2801 /* Detach all streams */
2802 pa_sink_detach_within_thread(s);
2805 case PA_SINK_MESSAGE_ATTACH:
2807 /* Reattach all streams */
2808 pa_sink_attach_within_thread(s);
2811 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2813 pa_usec_t *usec = userdata;
2814 *usec = pa_sink_get_requested_latency_within_thread(s);
2816 /* Yes, that's right, the IO thread will see -1 when no
2817 * explicit requested latency is configured, the main
2818 * thread will see max_latency */
2819 if (*usec == (pa_usec_t) -1)
2820 *usec = s->thread_info.max_latency;
2825 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2826 pa_usec_t *r = userdata;
2828 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2833 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2834 pa_usec_t *r = userdata;
2836 r[0] = s->thread_info.min_latency;
2837 r[1] = s->thread_info.max_latency;
2842 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2844 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2847 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2849 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2852 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2854 *((size_t*) userdata) = s->thread_info.max_rewind;
2857 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2859 *((size_t*) userdata) = s->thread_info.max_request;
2862 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2864 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2867 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2869 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2872 case PA_SINK_MESSAGE_SET_PORT:
2874 pa_assert(userdata);
2876 struct sink_message_set_port *msg_data = userdata;
2877 msg_data->ret = s->set_port(s, msg_data->port);
2881 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2882 /* This message is sent from IO-thread and handled in main thread. */
2883 pa_assert_ctl_context();
2885 /* Make sure we're not messing with main thread when no longer linked */
2886 if (!PA_SINK_IS_LINKED(s->state))
2889 pa_sink_get_volume(s, true);
2890 pa_sink_get_mute(s, true);
2893 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
2894 s->thread_info.latency_offset = offset;
2897 case PA_SINK_MESSAGE_GET_LATENCY:
2898 case PA_SINK_MESSAGE_MAX:
2905 /* Called from main thread */
2906 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2911 pa_core_assert_ref(c);
2912 pa_assert_ctl_context();
2913 pa_assert(cause != 0);
2915 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2918 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2925 /* Called from main thread */
2926 void pa_sink_detach(pa_sink *s) {
2927 pa_sink_assert_ref(s);
2928 pa_assert_ctl_context();
2929 pa_assert(PA_SINK_IS_LINKED(s->state));
2931 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2934 /* Called from main thread */
2935 void pa_sink_attach(pa_sink *s) {
2936 pa_sink_assert_ref(s);
2937 pa_assert_ctl_context();
2938 pa_assert(PA_SINK_IS_LINKED(s->state));
2940 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2943 /* Called from IO thread */
2944 void pa_sink_detach_within_thread(pa_sink *s) {
2948 pa_sink_assert_ref(s);
2949 pa_sink_assert_io_context(s);
2950 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2952 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2956 if (s->monitor_source)
2957 pa_source_detach_within_thread(s->monitor_source);
2960 /* Called from IO thread */
2961 void pa_sink_attach_within_thread(pa_sink *s) {
2965 pa_sink_assert_ref(s);
2966 pa_sink_assert_io_context(s);
2967 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2969 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2973 if (s->monitor_source)
2974 pa_source_attach_within_thread(s->monitor_source);
2977 /* Called from IO thread */
2978 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2979 pa_sink_assert_ref(s);
2980 pa_sink_assert_io_context(s);
2981 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2983 if (nbytes == (size_t) -1)
2984 nbytes = s->thread_info.max_rewind;
2986 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2988 if (s->thread_info.rewind_requested &&
2989 nbytes <= s->thread_info.rewind_nbytes)
2992 s->thread_info.rewind_nbytes = nbytes;
2993 s->thread_info.rewind_requested = true;
2995 if (s->request_rewind)
2996 s->request_rewind(s);
2999 /* Called from IO thread */
3000 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3001 pa_usec_t result = (pa_usec_t) -1;
3004 pa_usec_t monitor_latency;
3006 pa_sink_assert_ref(s);
3007 pa_sink_assert_io_context(s);
3009 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3010 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3012 if (s->thread_info.requested_latency_valid)
3013 return s->thread_info.requested_latency;
3015 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3016 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3017 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3018 result = i->thread_info.requested_sink_latency;
3020 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3022 if (monitor_latency != (pa_usec_t) -1 &&
3023 (result == (pa_usec_t) -1 || result > monitor_latency))
3024 result = monitor_latency;
3026 if (result != (pa_usec_t) -1)
3027 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3029 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3030 /* Only cache if properly initialized */
3031 s->thread_info.requested_latency = result;
3032 s->thread_info.requested_latency_valid = true;
3038 /* Called from main thread */
3039 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3042 pa_sink_assert_ref(s);
3043 pa_assert_ctl_context();
3044 pa_assert(PA_SINK_IS_LINKED(s->state));
3046 if (s->state == PA_SINK_SUSPENDED)
3049 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3054 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3055 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3059 pa_sink_assert_ref(s);
3060 pa_sink_assert_io_context(s);
3062 if (max_rewind == s->thread_info.max_rewind)
3065 s->thread_info.max_rewind = max_rewind;
3067 if (PA_SINK_IS_LINKED(s->thread_info.state))
3068 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3069 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3071 if (s->monitor_source)
3072 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3075 /* Called from main thread */
3076 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3077 pa_sink_assert_ref(s);
3078 pa_assert_ctl_context();
3080 if (PA_SINK_IS_LINKED(s->state))
3081 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3083 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3086 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3087 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3090 pa_sink_assert_ref(s);
3091 pa_sink_assert_io_context(s);
3093 if (max_request == s->thread_info.max_request)
3096 s->thread_info.max_request = max_request;
3098 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3101 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3102 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3106 /* Called from main thread */
3107 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3108 pa_sink_assert_ref(s);
3109 pa_assert_ctl_context();
3111 if (PA_SINK_IS_LINKED(s->state))
3112 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3114 pa_sink_set_max_request_within_thread(s, max_request);
3117 /* Called from IO thread */
3118 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3122 pa_sink_assert_ref(s);
3123 pa_sink_assert_io_context(s);
3125 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3126 s->thread_info.requested_latency_valid = false;
3130 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3132 if (s->update_requested_latency)
3133 s->update_requested_latency(s);
3135 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3136 if (i->update_sink_requested_latency)
3137 i->update_sink_requested_latency(i);
3141 /* Called from main thread */
3142 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3143 pa_sink_assert_ref(s);
3144 pa_assert_ctl_context();
3146 /* min_latency == 0: no limit
3147 * min_latency anything else: specified limit
3149 * Similar for max_latency */
3151 if (min_latency < ABSOLUTE_MIN_LATENCY)
3152 min_latency = ABSOLUTE_MIN_LATENCY;
3154 if (max_latency <= 0 ||
3155 max_latency > ABSOLUTE_MAX_LATENCY)
3156 max_latency = ABSOLUTE_MAX_LATENCY;
3158 pa_assert(min_latency <= max_latency);
3160 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3161 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3162 max_latency == ABSOLUTE_MAX_LATENCY) ||
3163 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3165 if (PA_SINK_IS_LINKED(s->state)) {
3171 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3173 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3176 /* Called from main thread */
3177 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3178 pa_sink_assert_ref(s);
3179 pa_assert_ctl_context();
3180 pa_assert(min_latency);
3181 pa_assert(max_latency);
3183 if (PA_SINK_IS_LINKED(s->state)) {
3184 pa_usec_t r[2] = { 0, 0 };
3186 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3188 *min_latency = r[0];
3189 *max_latency = r[1];
3191 *min_latency = s->thread_info.min_latency;
3192 *max_latency = s->thread_info.max_latency;
3196 /* Called from IO thread */
3197 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3198 pa_sink_assert_ref(s);
3199 pa_sink_assert_io_context(s);
3201 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3202 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3203 pa_assert(min_latency <= max_latency);
3205 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3206 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3207 max_latency == ABSOLUTE_MAX_LATENCY) ||
3208 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3210 if (s->thread_info.min_latency == min_latency &&
3211 s->thread_info.max_latency == max_latency)
3214 s->thread_info.min_latency = min_latency;
3215 s->thread_info.max_latency = max_latency;
3217 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3221 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3222 if (i->update_sink_latency_range)
3223 i->update_sink_latency_range(i);
3226 pa_sink_invalidate_requested_latency(s, false);
3228 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3231 /* Called from main thread */
3232 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3233 pa_sink_assert_ref(s);
3234 pa_assert_ctl_context();
3236 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3237 pa_assert(latency == 0);
3241 if (latency < ABSOLUTE_MIN_LATENCY)
3242 latency = ABSOLUTE_MIN_LATENCY;
3244 if (latency > ABSOLUTE_MAX_LATENCY)
3245 latency = ABSOLUTE_MAX_LATENCY;
3247 if (PA_SINK_IS_LINKED(s->state))
3248 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3250 s->thread_info.fixed_latency = latency;
3252 pa_source_set_fixed_latency(s->monitor_source, latency);
3255 /* Called from main thread */
3256 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3259 pa_sink_assert_ref(s);
3260 pa_assert_ctl_context();
3262 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3265 if (PA_SINK_IS_LINKED(s->state))
3266 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3268 latency = s->thread_info.fixed_latency;
3273 /* Called from IO thread */
3274 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3275 pa_sink_assert_ref(s);
3276 pa_sink_assert_io_context(s);
3278 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3279 pa_assert(latency == 0);
3280 s->thread_info.fixed_latency = 0;
3282 if (s->monitor_source)
3283 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3288 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3289 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3291 if (s->thread_info.fixed_latency == latency)
3294 s->thread_info.fixed_latency = latency;
3296 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3300 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3301 if (i->update_sink_fixed_latency)
3302 i->update_sink_fixed_latency(i);
3305 pa_sink_invalidate_requested_latency(s, false);
3307 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3310 /* Called from main context */
3311 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3312 pa_sink_assert_ref(s);
3314 s->latency_offset = offset;
3316 if (PA_SINK_IS_LINKED(s->state))
3317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3319 s->thread_info.latency_offset = offset;
3322 /* Called from main context */
3323 size_t pa_sink_get_max_rewind(pa_sink *s) {
3325 pa_assert_ctl_context();
3326 pa_sink_assert_ref(s);
3328 if (!PA_SINK_IS_LINKED(s->state))
3329 return s->thread_info.max_rewind;
3331 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3336 /* Called from main context */
3337 size_t pa_sink_get_max_request(pa_sink *s) {
3339 pa_sink_assert_ref(s);
3340 pa_assert_ctl_context();
3342 if (!PA_SINK_IS_LINKED(s->state))
3343 return s->thread_info.max_request;
3345 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3350 /* Called from main context */
3351 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3352 pa_device_port *port;
3355 pa_sink_assert_ref(s);
3356 pa_assert_ctl_context();
3359 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3360 return -PA_ERR_NOTIMPLEMENTED;
3364 return -PA_ERR_NOENTITY;
3366 if (!(port = pa_hashmap_get(s->ports, name)))
3367 return -PA_ERR_NOENTITY;
3369 if (s->active_port == port) {
3370 s->save_port = s->save_port || save;
3374 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3375 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3376 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3380 ret = s->set_port(s, port);
3383 return -PA_ERR_NOENTITY;
3385 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3387 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3389 s->active_port = port;
3390 s->save_port = save;
3392 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3394 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3399 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3400 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3404 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3407 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3409 if (pa_streq(ff, "microphone"))
3410 t = "audio-input-microphone";
3411 else if (pa_streq(ff, "webcam"))
3413 else if (pa_streq(ff, "computer"))
3415 else if (pa_streq(ff, "handset"))
3417 else if (pa_streq(ff, "portable"))
3418 t = "multimedia-player";
3419 else if (pa_streq(ff, "tv"))
3420 t = "video-display";
3423 * The following icons are not part of the icon naming spec,
3424 * because Rodney Dawes sucks as the maintainer of that spec.
3426 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3428 else if (pa_streq(ff, "headset"))
3429 t = "audio-headset";
3430 else if (pa_streq(ff, "headphone"))
3431 t = "audio-headphones";
3432 else if (pa_streq(ff, "speaker"))
3433 t = "audio-speakers";
3434 else if (pa_streq(ff, "hands-free"))
3435 t = "audio-handsfree";
3439 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3440 if (pa_streq(c, "modem"))
3447 t = "audio-input-microphone";
3450 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3451 if (strstr(profile, "analog"))
3453 else if (strstr(profile, "iec958"))
3455 else if (strstr(profile, "hdmi"))
3459 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3461 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3466 bool pa_device_init_description(pa_proplist *p) {
3467 const char *s, *d = NULL, *k;
3470 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3473 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3474 if (pa_streq(s, "internal"))
3475 d = _("Built-in Audio");
3478 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3479 if (pa_streq(s, "modem"))
3483 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3488 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3491 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3493 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3498 bool pa_device_init_intended_roles(pa_proplist *p) {
3502 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3505 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3506 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3507 || pa_streq(s, "headset")) {
3508 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3515 unsigned pa_device_init_priority(pa_proplist *p) {
3517 unsigned priority = 0;
3521 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3523 if (pa_streq(s, "sound"))
3525 else if (!pa_streq(s, "modem"))
3529 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3531 if (pa_streq(s, "internal"))
3533 else if (pa_streq(s, "speaker"))
3535 else if (pa_streq(s, "headphone"))
3539 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3541 if (pa_streq(s, "pci"))
3543 else if (pa_streq(s, "usb"))
3545 else if (pa_streq(s, "bluetooth"))
3549 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3551 if (pa_startswith(s, "analog-"))
3553 else if (pa_startswith(s, "iec958-"))
3560 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3562 /* Called from the IO thread. */
3563 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3564 pa_sink_volume_change *c;
3565 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3566 c = pa_xnew(pa_sink_volume_change, 1);
3568 PA_LLIST_INIT(pa_sink_volume_change, c);
3570 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3574 /* Called from the IO thread. */
3575 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3577 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3581 /* Called from the IO thread. */
3582 void pa_sink_volume_change_push(pa_sink *s) {
3583 pa_sink_volume_change *c = NULL;
3584 pa_sink_volume_change *nc = NULL;
3585 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3587 const char *direction = NULL;
3590 nc = pa_sink_volume_change_new(s);
3592 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3593 * Adding one more volume for HW would get us rid of this, but I am trying
3594 * to survive with the ones we already have. */
3595 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3597 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3598 pa_log_debug("Volume not changing");
3599 pa_sink_volume_change_free(nc);
3603 nc->at = pa_sink_get_latency_within_thread(s);
3604 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3606 if (s->thread_info.volume_changes_tail) {
3607 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3608 /* If volume is going up let's do it a bit late. If it is going
3609 * down let's do it a bit early. */
3610 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3611 if (nc->at + safety_margin > c->at) {
3612 nc->at += safety_margin;
3617 else if (nc->at - safety_margin > c->at) {
3618 nc->at -= safety_margin;
3626 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3627 nc->at += safety_margin;
3630 nc->at -= safety_margin;
3633 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3636 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3639 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3641 /* We can ignore volume events that came earlier but should happen later than this. */
3642 PA_LLIST_FOREACH(c, nc->next) {
3643 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3644 pa_sink_volume_change_free(c);
3647 s->thread_info.volume_changes_tail = nc;
3650 /* Called from the IO thread. */
3651 static void pa_sink_volume_change_flush(pa_sink *s) {
3652 pa_sink_volume_change *c = s->thread_info.volume_changes;
3654 s->thread_info.volume_changes = NULL;
3655 s->thread_info.volume_changes_tail = NULL;
3657 pa_sink_volume_change *next = c->next;
3658 pa_sink_volume_change_free(c);
3663 /* Called from the IO thread. */
3664 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3670 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3676 pa_assert(s->write_volume);
3678 now = pa_rtclock_now();
3680 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3681 pa_sink_volume_change *c = s->thread_info.volume_changes;
3682 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3683 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3684 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3686 s->thread_info.current_hw_volume = c->hw_volume;
3687 pa_sink_volume_change_free(c);
3693 if (s->thread_info.volume_changes) {
3695 *usec_to_next = s->thread_info.volume_changes->at - now;
3696 if (pa_log_ratelimit(PA_LOG_DEBUG))
3697 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3702 s->thread_info.volume_changes_tail = NULL;
3707 /* Called from the IO thread. */
3708 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3709 /* All the queued volume events later than current latency are shifted to happen earlier. */
3710 pa_sink_volume_change *c;
3711 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3712 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3713 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3715 pa_log_debug("latency = %lld", (long long) limit);
3716 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3718 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3719 pa_usec_t modified_limit = limit;
3720 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3721 modified_limit -= s->thread_info.volume_change_safety_margin;
3723 modified_limit += s->thread_info.volume_change_safety_margin;
3724 if (c->at > modified_limit) {
3726 if (c->at < modified_limit)
3727 c->at = modified_limit;
3729 prev_vol = pa_cvolume_avg(&c->hw_volume);
3731 pa_sink_volume_change_apply(s, NULL);
3734 /* Called from the main thread */
3735 /* Gets the list of formats supported by the sink. The members and idxset must
3736 * be freed by the caller. */
3737 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3742 if (s->get_formats) {
3743 /* Sink supports format query, all is good */
3744 ret = s->get_formats(s);
3746 /* Sink doesn't support format query, so assume it does PCM */
3747 pa_format_info *f = pa_format_info_new();
3748 f->encoding = PA_ENCODING_PCM;
3750 ret = pa_idxset_new(NULL, NULL);
3751 pa_idxset_put(ret, f, NULL);
3757 /* Called from the main thread */
3758 /* Allows an external source to set what formats a sink supports if the sink
3759 * permits this. The function makes a copy of the formats on success. */
3760 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3765 /* Sink supports setting formats -- let's give it a shot */
3766 return s->set_formats(s, formats);
3768 /* Sink doesn't support setting this -- bail out */
3772 /* Called from the main thread */
3773 /* Checks if the sink can accept this format */
3774 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3775 pa_idxset *formats = NULL;
3781 formats = pa_sink_get_formats(s);
3784 pa_format_info *finfo_device;
3787 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3788 if (pa_format_info_is_compatible(finfo_device, f)) {
3794 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3800 /* Called from the main thread */
3801 /* Calculates the intersection between formats supported by the sink and
3802 * in_formats, and returns these, in the order of the sink's formats. */
3803 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3804 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3805 pa_format_info *f_sink, *f_in;
3810 if (!in_formats || pa_idxset_isempty(in_formats))
3813 sink_formats = pa_sink_get_formats(s);
3815 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3816 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3817 if (pa_format_info_is_compatible(f_sink, f_in))
3818 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3824 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);