2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
60 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
62 struct pa_sink_volume_change {
66 PA_LLIST_FIELDS(pa_sink_volume_change);
69 struct sink_message_set_port {
74 static void sink_free(pa_object *s);
76 static void pa_sink_volume_change_push(pa_sink *s);
77 static void pa_sink_volume_change_flush(pa_sink *s);
78 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
80 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
84 data->proplist = pa_proplist_new();
85 data->ports = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
90 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
94 data->name = pa_xstrdup(name);
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
100 if ((data->sample_spec_is_set = !!spec))
101 data->sample_spec = *spec;
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
107 if ((data->channel_map_is_set = !!map))
108 data->channel_map = *map;
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
114 data->alternate_sample_rate_is_set = TRUE;
115 data->alternate_sample_rate = alternate_sample_rate;
118 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
121 if ((data->volume_is_set = !!volume))
122 data->volume = *volume;
125 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
128 data->muted_is_set = TRUE;
129 data->muted = !!mute;
132 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
135 pa_xfree(data->active_port);
136 data->active_port = pa_xstrdup(port);
139 void pa_sink_new_data_done(pa_sink_new_data *data) {
142 pa_proplist_free(data->proplist);
145 pa_hashmap_free(data->ports, (pa_free_cb_t) pa_device_port_unref);
147 pa_xfree(data->name);
148 pa_xfree(data->active_port);
152 /* Called from main context */
153 static void reset_callbacks(pa_sink *s) {
157 s->get_volume = NULL;
158 s->set_volume = NULL;
159 s->write_volume = NULL;
162 s->request_rewind = NULL;
163 s->update_requested_latency = NULL;
165 s->get_formats = NULL;
166 s->set_formats = NULL;
167 s->update_rate = NULL;
170 /* Called from main context */
171 pa_sink* pa_sink_new(
173 pa_sink_new_data *data,
174 pa_sink_flags_t flags) {
178 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
179 pa_source_new_data source_data;
185 pa_assert(data->name);
186 pa_assert_ctl_context();
188 s = pa_msgobject_new(pa_sink);
190 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
191 pa_log_debug("Failed to register name %s.", data->name);
196 pa_sink_new_data_set_name(data, name);
198 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
200 pa_namereg_unregister(core, name);
204 /* FIXME, need to free s here on failure */
206 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
207 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
209 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
211 if (!data->channel_map_is_set)
212 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
214 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
215 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
217 /* FIXME: There should probably be a general function for checking whether
218 * the sink volume is allowed to be set, like there is for sink inputs. */
219 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
221 if (!data->volume_is_set) {
222 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
223 data->save_volume = FALSE;
226 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
227 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
229 if (!data->muted_is_set)
233 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
235 pa_device_init_description(data->proplist);
236 pa_device_init_icon(data->proplist, TRUE);
237 pa_device_init_intended_roles(data->proplist);
239 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
241 pa_namereg_unregister(core, name);
245 s->parent.parent.free = sink_free;
246 s->parent.process_msg = pa_sink_process_msg;
249 s->state = PA_SINK_INIT;
252 s->suspend_cause = data->suspend_cause;
253 pa_sink_set_mixer_dirty(s, FALSE);
254 s->name = pa_xstrdup(name);
255 s->proplist = pa_proplist_copy(data->proplist);
256 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
257 s->module = data->module;
258 s->card = data->card;
260 s->priority = pa_device_init_priority(s->proplist);
262 s->sample_spec = data->sample_spec;
263 s->channel_map = data->channel_map;
264 s->default_sample_rate = s->sample_spec.rate;
266 if (data->alternate_sample_rate_is_set)
267 s->alternate_sample_rate = data->alternate_sample_rate;
269 s->alternate_sample_rate = s->core->alternate_sample_rate;
271 if (s->sample_spec.rate == s->alternate_sample_rate) {
272 pa_log_warn("Default and alternate sample rates are the same.");
273 s->alternate_sample_rate = 0;
276 s->inputs = pa_idxset_new(NULL, NULL);
278 s->input_to_master = NULL;
280 s->reference_volume = s->real_volume = data->volume;
281 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
282 s->base_volume = PA_VOLUME_NORM;
283 s->n_volume_steps = PA_VOLUME_NORM+1;
284 s->muted = data->muted;
285 s->refresh_volume = s->refresh_muted = FALSE;
292 /* As a minor optimization we just steal the list instead of
294 s->ports = data->ports;
297 s->active_port = NULL;
298 s->save_port = FALSE;
300 if (data->active_port)
301 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
302 s->save_port = data->save_port;
304 if (!s->active_port) {
308 PA_HASHMAP_FOREACH(p, s->ports, state)
309 if (!s->active_port || p->priority > s->active_port->priority)
314 s->latency_offset = s->active_port->latency_offset;
316 s->latency_offset = 0;
318 s->save_volume = data->save_volume;
319 s->save_muted = data->save_muted;
321 pa_silence_memchunk_get(
322 &core->silence_cache,
328 s->thread_info.rtpoll = NULL;
329 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = FALSE;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = FALSE;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.latency_offset = s->latency_offset;
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
382 pa_source_new_data_done(&source_data);
384 if (!s->monitor_source) {
390 s->monitor_source->monitor_of = s;
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
402 pa_bool_t suspend_change;
403 pa_sink_state_t original_state;
406 pa_assert_ctl_context();
408 if (s->state == state)
411 original_state = s->state;
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
418 if ((ret = s->set_state(s, state)) < 0)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
425 s->set_state(s, original_state);
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
437 if (suspend_change) {
441 /* We're suspending or resuming, tell everyone about it */
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
448 i->suspend(i, state == PA_SINK_SUSPENDED);
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
467 pa_assert(!s->write_volume || cb);
471 /* Save the current flags so we can tell if they've changed */
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
492 pa_assert(!cb || s->set_volume);
494 s->write_volume = cb;
496 /* Save the current flags so we can tell if they've changed */
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
522 /* Save the current flags so we can tell if they've changed */
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
535 static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
536 pa_sink_flags_t flags;
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
543 /* Save the current flags so we can tell if they've changed */
547 s->flags |= PA_SINK_FLAT_VOLUME;
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
556 void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
557 pa_sink_flags_t flags;
561 /* Save the current flags so we can tell if they've changed */
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, TRUE);
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, FALSE);
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
597 * Note: All of these flags set here can change over the life time
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
615 pa_sink_enable_decibel_volume(s, TRUE);
617 /* If the sink implementor support DB volumes by itself, we should always
618 * try and enable flat volumes too */
619 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
620 enable_flat_volume(s, TRUE);
622 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
623 pa_sink *root_sink = pa_sink_get_master(s);
625 pa_assert(root_sink);
627 s->reference_volume = root_sink->reference_volume;
628 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
630 s->real_volume = root_sink->real_volume;
631 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
633 /* We assume that if the sink implementor changed the default
634 * volume he did so in real_volume, because that is the usual
635 * place where he is supposed to place his changes. */
636 s->reference_volume = s->real_volume;
638 s->thread_info.soft_volume = s->soft_volume;
639 s->thread_info.soft_muted = s->muted;
640 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
642 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
643 || (s->base_volume == PA_VOLUME_NORM
644 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
645 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
646 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
647 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
650 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
651 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
652 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
654 if (s->suspend_cause)
655 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
657 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
659 pa_source_put(s->monitor_source);
661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
665 /* Called from main context */
666 void pa_sink_unlink(pa_sink* s) {
668 pa_sink_input *i, *j = NULL;
671 pa_assert_ctl_context();
673 /* Please note that pa_sink_unlink() does more than simply
674 * reversing pa_sink_put(). It also undoes the registrations
675 * already done in pa_sink_new()! */
677 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
678 * may be called multiple times on the same sink without bad
681 linked = PA_SINK_IS_LINKED(s->state);
684 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
686 if (s->state != PA_SINK_UNLINKED)
687 pa_namereg_unregister(s->core, s->name);
688 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
691 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
693 while ((i = pa_idxset_first(s->inputs, NULL))) {
695 pa_sink_input_kill(i);
700 sink_set_state(s, PA_SINK_UNLINKED);
702 s->state = PA_SINK_UNLINKED;
706 if (s->monitor_source)
707 pa_source_unlink(s->monitor_source);
710 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
711 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
715 /* Called from main context */
716 static void sink_free(pa_object *o) {
717 pa_sink *s = PA_SINK(o);
720 pa_assert_ctl_context();
721 pa_assert(pa_sink_refcnt(s) == 0);
723 if (PA_SINK_IS_LINKED(s->state))
726 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
728 if (s->monitor_source) {
729 pa_source_unref(s->monitor_source);
730 s->monitor_source = NULL;
733 pa_idxset_free(s->inputs, NULL);
734 pa_hashmap_free(s->thread_info.inputs, (pa_free_cb_t) pa_sink_input_unref);
736 if (s->silence.memblock)
737 pa_memblock_unref(s->silence.memblock);
743 pa_proplist_free(s->proplist);
746 pa_hashmap_free(s->ports, (pa_free_cb_t) pa_device_port_unref);
751 /* Called from main context, and not while the IO thread is active, please */
752 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
753 pa_sink_assert_ref(s);
754 pa_assert_ctl_context();
758 if (s->monitor_source)
759 pa_source_set_asyncmsgq(s->monitor_source, q);
762 /* Called from main context, and not while the IO thread is active, please */
763 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
764 pa_sink_assert_ref(s);
765 pa_assert_ctl_context();
770 /* For now, allow only a minimal set of flags to be changed. */
771 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
773 s->flags = (s->flags & ~mask) | (value & mask);
775 pa_source_update_flags(s->monitor_source,
776 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
777 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
778 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
779 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
782 /* Called from IO context, or before _put() from main context */
783 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
784 pa_sink_assert_ref(s);
785 pa_sink_assert_io_context(s);
787 s->thread_info.rtpoll = p;
789 if (s->monitor_source)
790 pa_source_set_rtpoll(s->monitor_source, p);
793 /* Called from main context */
794 int pa_sink_update_status(pa_sink*s) {
795 pa_sink_assert_ref(s);
796 pa_assert_ctl_context();
797 pa_assert(PA_SINK_IS_LINKED(s->state));
799 if (s->state == PA_SINK_SUSPENDED)
802 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
805 /* Called from any context - must be threadsafe */
806 void pa_sink_set_mixer_dirty(pa_sink *s, pa_bool_t is_dirty)
808 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
811 /* Called from main context */
812 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
813 pa_sink_assert_ref(s);
814 pa_assert_ctl_context();
815 pa_assert(PA_SINK_IS_LINKED(s->state));
816 pa_assert(cause != 0);
819 s->suspend_cause |= cause;
820 s->monitor_source->suspend_cause |= cause;
822 s->suspend_cause &= ~cause;
823 s->monitor_source->suspend_cause &= ~cause;
826 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
827 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
828 it'll be handled just fine. */
829 pa_sink_set_mixer_dirty(s, FALSE);
830 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
831 if (s->active_port && s->set_port) {
832 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
833 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
834 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
837 s->set_port(s, s->active_port);
847 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
850 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
852 if (s->suspend_cause)
853 return sink_set_state(s, PA_SINK_SUSPENDED);
855 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
858 /* Called from main context */
859 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
860 pa_sink_input *i, *n;
863 pa_sink_assert_ref(s);
864 pa_assert_ctl_context();
865 pa_assert(PA_SINK_IS_LINKED(s->state));
870 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
871 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
873 pa_sink_input_ref(i);
875 if (pa_sink_input_start_move(i) >= 0)
878 pa_sink_input_unref(i);
884 /* Called from main context */
885 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
888 pa_sink_assert_ref(s);
889 pa_assert_ctl_context();
890 pa_assert(PA_SINK_IS_LINKED(s->state));
893 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
894 if (pa_sink_input_finish_move(i, s, save) < 0)
895 pa_sink_input_fail_move(i);
897 pa_sink_input_unref(i);
900 pa_queue_free(q, NULL);
903 /* Called from main context */
904 void pa_sink_move_all_fail(pa_queue *q) {
907 pa_assert_ctl_context();
910 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
911 pa_sink_input_fail_move(i);
912 pa_sink_input_unref(i);
915 pa_queue_free(q, NULL);
918 /* Called from IO thread context */
919 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
923 pa_sink_assert_ref(s);
924 pa_sink_assert_io_context(s);
925 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
927 /* If nobody requested this and this is actually no real rewind
928 * then we can short cut this. Please note that this means that
929 * not all rewind requests triggered upstream will always be
930 * translated in actual requests! */
931 if (!s->thread_info.rewind_requested && nbytes <= 0)
934 s->thread_info.rewind_nbytes = 0;
935 s->thread_info.rewind_requested = FALSE;
938 pa_log_debug("Processing rewind...");
939 if (s->flags & PA_SINK_DEFERRED_VOLUME)
940 pa_sink_volume_change_rewind(s, nbytes);
943 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
944 pa_sink_input_assert_ref(i);
945 pa_sink_input_process_rewind(i, nbytes);
949 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
950 pa_source_process_rewind(s->monitor_source, nbytes);
954 /* Called from IO thread context */
955 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
959 size_t mixlength = *length;
961 pa_sink_assert_ref(s);
962 pa_sink_assert_io_context(s);
965 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
966 pa_sink_input_assert_ref(i);
968 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
970 if (mixlength == 0 || info->chunk.length < mixlength)
971 mixlength = info->chunk.length;
973 if (pa_memblock_is_silence(info->chunk.memblock)) {
974 pa_memblock_unref(info->chunk.memblock);
978 info->userdata = pa_sink_input_ref(i);
980 pa_assert(info->chunk.memblock);
981 pa_assert(info->chunk.length > 0);
994 /* Called from IO thread context */
995 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
999 unsigned n_unreffed = 0;
1001 pa_sink_assert_ref(s);
1002 pa_sink_assert_io_context(s);
1004 pa_assert(result->memblock);
1005 pa_assert(result->length > 0);
1007 /* We optimize for the case where the order of the inputs has not changed */
1009 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1011 pa_mix_info* m = NULL;
1013 pa_sink_input_assert_ref(i);
1015 /* Let's try to find the matching entry info the pa_mix_info array */
1016 for (j = 0; j < n; j ++) {
1018 if (info[p].userdata == i) {
1028 /* Drop read data */
1029 pa_sink_input_drop(i, result->length);
1031 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1033 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1034 void *ostate = NULL;
1035 pa_source_output *o;
1038 if (m && m->chunk.memblock) {
1040 pa_memblock_ref(c.memblock);
1041 pa_assert(result->length <= c.length);
1042 c.length = result->length;
1044 pa_memchunk_make_writable(&c, 0);
1045 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1048 pa_memblock_ref(c.memblock);
1049 pa_assert(result->length <= c.length);
1050 c.length = result->length;
1053 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1054 pa_source_output_assert_ref(o);
1055 pa_assert(o->direct_on_input == i);
1056 pa_source_post_direct(s->monitor_source, o, &c);
1059 pa_memblock_unref(c.memblock);
1064 if (m->chunk.memblock)
1065 pa_memblock_unref(m->chunk.memblock);
1066 pa_memchunk_reset(&m->chunk);
1068 pa_sink_input_unref(m->userdata);
1075 /* Now drop references to entries that are included in the
1076 * pa_mix_info array but don't exist anymore */
1078 if (n_unreffed < n) {
1079 for (; n > 0; info++, n--) {
1081 pa_sink_input_unref(info->userdata);
1082 if (info->chunk.memblock)
1083 pa_memblock_unref(info->chunk.memblock);
1087 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1088 pa_source_post(s->monitor_source, result);
1091 /* Called from IO thread context */
1092 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1093 pa_mix_info info[MAX_MIX_CHANNELS];
1095 size_t block_size_max;
1097 pa_sink_assert_ref(s);
1098 pa_sink_assert_io_context(s);
1099 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1100 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1103 pa_assert(!s->thread_info.rewind_requested);
1104 pa_assert(s->thread_info.rewind_nbytes == 0);
1106 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1107 result->memblock = pa_memblock_ref(s->silence.memblock);
1108 result->index = s->silence.index;
1109 result->length = PA_MIN(s->silence.length, length);
1116 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1118 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1119 if (length > block_size_max)
1120 length = pa_frame_align(block_size_max, &s->sample_spec);
1122 pa_assert(length > 0);
1124 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1128 *result = s->silence;
1129 pa_memblock_ref(result->memblock);
1131 if (result->length > length)
1132 result->length = length;
1134 } else if (n == 1) {
1137 *result = info[0].chunk;
1138 pa_memblock_ref(result->memblock);
1140 if (result->length > length)
1141 result->length = length;
1143 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1145 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1146 pa_memblock_unref(result->memblock);
1147 pa_silence_memchunk_get(&s->core->silence_cache,
1152 } else if (!pa_cvolume_is_norm(&volume)) {
1153 pa_memchunk_make_writable(result, 0);
1154 pa_volume_memchunk(result, &s->sample_spec, &volume);
1158 result->memblock = pa_memblock_new(s->core->mempool, length);
1160 ptr = pa_memblock_acquire(result->memblock);
1161 result->length = pa_mix(info, n,
1164 &s->thread_info.soft_volume,
1165 s->thread_info.soft_muted);
1166 pa_memblock_release(result->memblock);
1171 inputs_drop(s, info, n, result);
1176 /* Called from IO thread context */
1177 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1178 pa_mix_info info[MAX_MIX_CHANNELS];
1180 size_t length, block_size_max;
1182 pa_sink_assert_ref(s);
1183 pa_sink_assert_io_context(s);
1184 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1186 pa_assert(target->memblock);
1187 pa_assert(target->length > 0);
1188 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1190 pa_assert(!s->thread_info.rewind_requested);
1191 pa_assert(s->thread_info.rewind_nbytes == 0);
1193 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1194 pa_silence_memchunk(target, &s->sample_spec);
1200 length = target->length;
1201 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1202 if (length > block_size_max)
1203 length = pa_frame_align(block_size_max, &s->sample_spec);
1205 pa_assert(length > 0);
1207 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1210 if (target->length > length)
1211 target->length = length;
1213 pa_silence_memchunk(target, &s->sample_spec);
1214 } else if (n == 1) {
1217 if (target->length > length)
1218 target->length = length;
1220 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1222 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1223 pa_silence_memchunk(target, &s->sample_spec);
1227 vchunk = info[0].chunk;
1228 pa_memblock_ref(vchunk.memblock);
1230 if (vchunk.length > length)
1231 vchunk.length = length;
1233 if (!pa_cvolume_is_norm(&volume)) {
1234 pa_memchunk_make_writable(&vchunk, 0);
1235 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1238 pa_memchunk_memcpy(target, &vchunk);
1239 pa_memblock_unref(vchunk.memblock);
1245 ptr = pa_memblock_acquire(target->memblock);
1247 target->length = pa_mix(info, n,
1248 (uint8_t*) ptr + target->index, length,
1250 &s->thread_info.soft_volume,
1251 s->thread_info.soft_muted);
1253 pa_memblock_release(target->memblock);
1256 inputs_drop(s, info, n, target);
1261 /* Called from IO thread context */
1262 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1266 pa_sink_assert_ref(s);
1267 pa_sink_assert_io_context(s);
1268 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1270 pa_assert(target->memblock);
1271 pa_assert(target->length > 0);
1272 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1274 pa_assert(!s->thread_info.rewind_requested);
1275 pa_assert(s->thread_info.rewind_nbytes == 0);
1277 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1278 pa_silence_memchunk(target, &s->sample_spec);
1291 pa_sink_render_into(s, &chunk);
1300 /* Called from IO thread context */
1301 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1302 pa_sink_assert_ref(s);
1303 pa_sink_assert_io_context(s);
1304 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1305 pa_assert(length > 0);
1306 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1309 pa_assert(!s->thread_info.rewind_requested);
1310 pa_assert(s->thread_info.rewind_nbytes == 0);
1314 pa_sink_render(s, length, result);
1316 if (result->length < length) {
1319 pa_memchunk_make_writable(result, length);
1321 chunk.memblock = result->memblock;
1322 chunk.index = result->index + result->length;
1323 chunk.length = length - result->length;
1325 pa_sink_render_into_full(s, &chunk);
1327 result->length = length;
1333 /* Called from main thread */
1334 pa_bool_t pa_sink_update_rate(pa_sink *s, uint32_t rate, pa_bool_t passthrough)
1336 if (s->update_rate) {
1337 uint32_t desired_rate = rate;
1338 uint32_t default_rate = s->default_sample_rate;
1339 uint32_t alternate_rate = s->alternate_sample_rate;
1342 pa_bool_t use_alternate = FALSE;
1344 if (PA_UNLIKELY(default_rate == alternate_rate)) {
1345 pa_log_warn("Default and alternate sample rates are the same.");
1349 if (PA_SINK_IS_RUNNING(s->state)) {
1350 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1351 s->sample_spec.rate);
1355 if (s->monitor_source) {
1356 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == TRUE) {
1357 pa_log_info("Cannot update rate, monitor source is RUNNING");
1362 if (PA_UNLIKELY (desired_rate < 8000 ||
1363 desired_rate > PA_RATE_MAX))
1367 pa_assert(default_rate % 4000 || default_rate % 11025);
1368 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
1370 if (default_rate % 4000) {
1371 /* default is a 11025 multiple */
1372 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1375 /* default is 4000 multiple */
1376 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1381 desired_rate = alternate_rate;
1383 desired_rate = default_rate;
1385 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1388 if (desired_rate == s->sample_spec.rate)
1391 if (!passthrough && pa_sink_used_by(s) > 0)
1394 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1395 pa_sink_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
1397 if (s->update_rate(s, desired_rate) == TRUE) {
1398 /* update monitor source as well */
1399 if (s->monitor_source && !passthrough)
1400 pa_source_update_rate(s->monitor_source, desired_rate, FALSE);
1401 pa_log_info("Changed sampling rate successfully");
1403 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1404 if (i->state == PA_SINK_INPUT_CORKED)
1405 pa_sink_input_update_rate(i);
1414 /* Called from main thread */
1415 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1418 pa_sink_assert_ref(s);
1419 pa_assert_ctl_context();
1420 pa_assert(PA_SINK_IS_LINKED(s->state));
1422 /* The returned value is supposed to be in the time domain of the sound card! */
1424 if (s->state == PA_SINK_SUSPENDED)
1427 if (!(s->flags & PA_SINK_LATENCY))
1430 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1432 /* usec is unsigned, so check that the offset can be added to usec without
1434 if (-s->latency_offset <= (int64_t) usec)
1435 usec += s->latency_offset;
1442 /* Called from IO thread */
1443 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1447 pa_sink_assert_ref(s);
1448 pa_sink_assert_io_context(s);
1449 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1451 /* The returned value is supposed to be in the time domain of the sound card! */
1453 if (s->thread_info.state == PA_SINK_SUSPENDED)
1456 if (!(s->flags & PA_SINK_LATENCY))
1459 o = PA_MSGOBJECT(s);
1461 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1463 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1466 /* usec is unsigned, so check that the offset can be added to usec without
1468 if (-s->thread_info.latency_offset <= (int64_t) usec)
1469 usec += s->thread_info.latency_offset;
1476 /* Called from the main thread (and also from the IO thread while the main
1477 * thread is waiting).
1479 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1480 * set. Instead, flat volume mode is detected by checking whether the root sink
1481 * has the flag set. */
1482 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1483 pa_sink_assert_ref(s);
1485 s = pa_sink_get_master(s);
1488 return (s->flags & PA_SINK_FLAT_VOLUME);
1493 /* Called from the main thread (and also from the IO thread while the main
1494 * thread is waiting). */
1495 pa_sink *pa_sink_get_master(pa_sink *s) {
1496 pa_sink_assert_ref(s);
1498 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1499 if (PA_UNLIKELY(!s->input_to_master))
1502 s = s->input_to_master->sink;
1508 /* Called from main context */
1509 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1510 pa_sink_input *alt_i;
1513 pa_sink_assert_ref(s);
1515 /* one and only one PASSTHROUGH input can possibly be connected */
1516 if (pa_idxset_size(s->inputs) == 1) {
1517 alt_i = pa_idxset_first(s->inputs, &idx);
1519 if (pa_sink_input_is_passthrough(alt_i))
1526 /* Called from main context */
1527 void pa_sink_enter_passthrough(pa_sink *s) {
1530 /* disable the monitor in passthrough mode */
1531 if (s->monitor_source) {
1532 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1533 pa_source_suspend(s->monitor_source, TRUE, PA_SUSPEND_PASSTHROUGH);
1536 /* set the volume to NORM */
1537 s->saved_volume = *pa_sink_get_volume(s, TRUE);
1538 s->saved_save_volume = s->save_volume;
1540 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1541 pa_sink_set_volume(s, &volume, TRUE, FALSE);
1544 /* Called from main context */
1545 void pa_sink_leave_passthrough(pa_sink *s) {
1546 /* Unsuspend monitor */
1547 if (s->monitor_source) {
1548 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1549 pa_source_suspend(s->monitor_source, FALSE, PA_SUSPEND_PASSTHROUGH);
1552 /* Restore sink volume to what it was before we entered passthrough mode */
1553 pa_sink_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1555 pa_cvolume_init(&s->saved_volume);
1556 s->saved_save_volume = FALSE;
1559 /* Called from main context. */
1560 static void compute_reference_ratio(pa_sink_input *i) {
1562 pa_cvolume remapped;
1565 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1568 * Calculates the reference ratio from the sink's reference
1569 * volume. This basically calculates:
1571 * i->reference_ratio = i->volume / i->sink->reference_volume
1574 remapped = i->sink->reference_volume;
1575 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1577 i->reference_ratio.channels = i->sample_spec.channels;
1579 for (c = 0; c < i->sample_spec.channels; c++) {
1581 /* We don't update when the sink volume is 0 anyway */
1582 if (remapped.values[c] <= PA_VOLUME_MUTED)
1585 /* Don't update the reference ratio unless necessary */
1586 if (pa_sw_volume_multiply(
1587 i->reference_ratio.values[c],
1588 remapped.values[c]) == i->volume.values[c])
1591 i->reference_ratio.values[c] = pa_sw_volume_divide(
1592 i->volume.values[c],
1593 remapped.values[c]);
1597 /* Called from main context. Only called for the root sink in volume sharing
1598 * cases, except for internal recursive calls. */
1599 static void compute_reference_ratios(pa_sink *s) {
1603 pa_sink_assert_ref(s);
1604 pa_assert_ctl_context();
1605 pa_assert(PA_SINK_IS_LINKED(s->state));
1606 pa_assert(pa_sink_flat_volume_enabled(s));
1608 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1609 compute_reference_ratio(i);
1611 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1612 compute_reference_ratios(i->origin_sink);
1616 /* Called from main context. Only called for the root sink in volume sharing
1617 * cases, except for internal recursive calls. */
1618 static void compute_real_ratios(pa_sink *s) {
1622 pa_sink_assert_ref(s);
1623 pa_assert_ctl_context();
1624 pa_assert(PA_SINK_IS_LINKED(s->state));
1625 pa_assert(pa_sink_flat_volume_enabled(s));
1627 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1629 pa_cvolume remapped;
1631 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1632 /* The origin sink uses volume sharing, so this input's real ratio
1633 * is handled as a special case - the real ratio must be 0 dB, and
1634 * as a result i->soft_volume must equal i->volume_factor. */
1635 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1636 i->soft_volume = i->volume_factor;
1638 compute_real_ratios(i->origin_sink);
1644 * This basically calculates:
1646 * i->real_ratio := i->volume / s->real_volume
1647 * i->soft_volume := i->real_ratio * i->volume_factor
1650 remapped = s->real_volume;
1651 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1653 i->real_ratio.channels = i->sample_spec.channels;
1654 i->soft_volume.channels = i->sample_spec.channels;
1656 for (c = 0; c < i->sample_spec.channels; c++) {
1658 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1659 /* We leave i->real_ratio untouched */
1660 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1664 /* Don't lose accuracy unless necessary */
1665 if (pa_sw_volume_multiply(
1666 i->real_ratio.values[c],
1667 remapped.values[c]) != i->volume.values[c])
1669 i->real_ratio.values[c] = pa_sw_volume_divide(
1670 i->volume.values[c],
1671 remapped.values[c]);
1673 i->soft_volume.values[c] = pa_sw_volume_multiply(
1674 i->real_ratio.values[c],
1675 i->volume_factor.values[c]);
1678 /* We don't copy the soft_volume to the thread_info data
1679 * here. That must be done by the caller */
1683 static pa_cvolume *cvolume_remap_minimal_impact(
1685 const pa_cvolume *template,
1686 const pa_channel_map *from,
1687 const pa_channel_map *to) {
1692 pa_assert(template);
1695 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1696 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1698 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1699 * mapping from sink input to sink volumes:
1701 * If template is a possible remapping from v it is used instead
1702 * of remapping anew.
1704 * If the channel maps don't match we set an all-channel volume on
1705 * the sink to ensure that changing a volume on one stream has no
1706 * effect that cannot be compensated for in another stream that
1707 * does not have the same channel map as the sink. */
1709 if (pa_channel_map_equal(from, to))
1713 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1718 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1722 /* Called from main thread. Only called for the root sink in volume sharing
1723 * cases, except for internal recursive calls. */
1724 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1728 pa_sink_assert_ref(s);
1729 pa_assert(max_volume);
1730 pa_assert(channel_map);
1731 pa_assert(pa_sink_flat_volume_enabled(s));
1733 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1734 pa_cvolume remapped;
1736 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1737 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1739 /* Ignore this input. The origin sink uses volume sharing, so this
1740 * input's volume will be set to be equal to the root sink's real
1741 * volume. Obviously this input's current volume must not then
1742 * affect what the root sink's real volume will be. */
1746 remapped = i->volume;
1747 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1748 pa_cvolume_merge(max_volume, max_volume, &remapped);
1752 /* Called from main thread. Only called for the root sink in volume sharing
1753 * cases, except for internal recursive calls. */
1754 static pa_bool_t has_inputs(pa_sink *s) {
1758 pa_sink_assert_ref(s);
1760 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1761 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1768 /* Called from main thread. Only called for the root sink in volume sharing
1769 * cases, except for internal recursive calls. */
1770 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1774 pa_sink_assert_ref(s);
1775 pa_assert(new_volume);
1776 pa_assert(channel_map);
1778 s->real_volume = *new_volume;
1779 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1781 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1782 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1783 if (pa_sink_flat_volume_enabled(s)) {
1784 pa_cvolume old_volume = i->volume;
1786 /* Follow the root sink's real volume. */
1787 i->volume = *new_volume;
1788 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1789 compute_reference_ratio(i);
1791 /* The volume changed, let's tell people so */
1792 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1793 if (i->volume_changed)
1794 i->volume_changed(i);
1796 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1800 update_real_volume(i->origin_sink, new_volume, channel_map);
1805 /* Called from main thread. Only called for the root sink in shared volume
1807 static void compute_real_volume(pa_sink *s) {
1808 pa_sink_assert_ref(s);
1809 pa_assert_ctl_context();
1810 pa_assert(PA_SINK_IS_LINKED(s->state));
1811 pa_assert(pa_sink_flat_volume_enabled(s));
1812 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1814 /* This determines the maximum volume of all streams and sets
1815 * s->real_volume accordingly. */
1817 if (!has_inputs(s)) {
1818 /* In the special case that we have no sink inputs we leave the
1819 * volume unmodified. */
1820 update_real_volume(s, &s->reference_volume, &s->channel_map);
1824 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1826 /* First let's determine the new maximum volume of all inputs
1827 * connected to this sink */
1828 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1829 update_real_volume(s, &s->real_volume, &s->channel_map);
1831 /* Then, let's update the real ratios/soft volumes of all inputs
1832 * connected to this sink */
1833 compute_real_ratios(s);
1836 /* Called from main thread. Only called for the root sink in shared volume
1837 * cases, except for internal recursive calls. */
1838 static void propagate_reference_volume(pa_sink *s) {
1842 pa_sink_assert_ref(s);
1843 pa_assert_ctl_context();
1844 pa_assert(PA_SINK_IS_LINKED(s->state));
1845 pa_assert(pa_sink_flat_volume_enabled(s));
1847 /* This is called whenever the sink volume changes that is not
1848 * caused by a sink input volume change. We need to fix up the
1849 * sink input volumes accordingly */
1851 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1852 pa_cvolume old_volume;
1854 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1855 propagate_reference_volume(i->origin_sink);
1857 /* Since the origin sink uses volume sharing, this input's volume
1858 * needs to be updated to match the root sink's real volume, but
1859 * that will be done later in update_shared_real_volume(). */
1863 old_volume = i->volume;
1865 /* This basically calculates:
1867 * i->volume := s->reference_volume * i->reference_ratio */
1869 i->volume = s->reference_volume;
1870 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1871 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1873 /* The volume changed, let's tell people so */
1874 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1876 if (i->volume_changed)
1877 i->volume_changed(i);
1879 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1884 /* Called from main thread. Only called for the root sink in volume sharing
1885 * cases, except for internal recursive calls. The return value indicates
1886 * whether any reference volume actually changed. */
1887 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1889 pa_bool_t reference_volume_changed;
1893 pa_sink_assert_ref(s);
1894 pa_assert(PA_SINK_IS_LINKED(s->state));
1896 pa_assert(channel_map);
1897 pa_assert(pa_cvolume_valid(v));
1900 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1902 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1903 s->reference_volume = volume;
1905 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1907 if (reference_volume_changed)
1908 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1909 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1910 /* If the root sink's volume doesn't change, then there can't be any
1911 * changes in the other sinks in the sink tree either.
1913 * It's probably theoretically possible that even if the root sink's
1914 * volume changes slightly, some filter sink doesn't change its volume
1915 * due to rounding errors. If that happens, we still want to propagate
1916 * the changed root sink volume to the sinks connected to the
1917 * intermediate sink that didn't change its volume. This theoretical
1918 * possibility is the reason why we have that !(s->flags &
1919 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1920 * notice even if we returned here FALSE always if
1921 * reference_volume_changed is FALSE. */
1924 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1925 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1926 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1932 /* Called from main thread */
1933 void pa_sink_set_volume(
1935 const pa_cvolume *volume,
1939 pa_cvolume new_reference_volume;
1942 pa_sink_assert_ref(s);
1943 pa_assert_ctl_context();
1944 pa_assert(PA_SINK_IS_LINKED(s->state));
1945 pa_assert(!volume || pa_cvolume_valid(volume));
1946 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1947 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1949 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
1950 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1951 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1952 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1956 /* In case of volume sharing, the volume is set for the root sink first,
1957 * from which it's then propagated to the sharing sinks. */
1958 root_sink = pa_sink_get_master(s);
1960 if (PA_UNLIKELY(!root_sink))
1963 /* As a special exception we accept mono volumes on all sinks --
1964 * even on those with more complex channel maps */
1967 if (pa_cvolume_compatible(volume, &s->sample_spec))
1968 new_reference_volume = *volume;
1970 new_reference_volume = s->reference_volume;
1971 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1974 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
1976 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
1977 if (pa_sink_flat_volume_enabled(root_sink)) {
1978 /* OK, propagate this volume change back to the inputs */
1979 propagate_reference_volume(root_sink);
1981 /* And now recalculate the real volume */
1982 compute_real_volume(root_sink);
1984 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
1988 /* If volume is NULL we synchronize the sink's real and
1989 * reference volumes with the stream volumes. */
1991 pa_assert(pa_sink_flat_volume_enabled(root_sink));
1993 /* Ok, let's determine the new real volume */
1994 compute_real_volume(root_sink);
1996 /* Let's 'push' the reference volume if necessary */
1997 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
1998 /* If the sink and it's root don't have the same number of channels, we need to remap */
1999 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2000 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2001 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2003 /* Now that the reference volume is updated, we can update the streams'
2004 * reference ratios. */
2005 compute_reference_ratios(root_sink);
2008 if (root_sink->set_volume) {
2009 /* If we have a function set_volume(), then we do not apply a
2010 * soft volume by default. However, set_volume() is free to
2011 * apply one to root_sink->soft_volume */
2013 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2014 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2015 root_sink->set_volume(root_sink);
2018 /* If we have no function set_volume(), then the soft volume
2019 * becomes the real volume */
2020 root_sink->soft_volume = root_sink->real_volume;
2022 /* This tells the sink that soft volume and/or real volume changed */
2024 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2027 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2028 * Only to be called by sink implementor */
2029 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2031 pa_sink_assert_ref(s);
2032 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2034 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2035 pa_sink_assert_io_context(s);
2037 pa_assert_ctl_context();
2040 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2042 s->soft_volume = *volume;
2044 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2045 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2047 s->thread_info.soft_volume = s->soft_volume;
2050 /* Called from the main thread. Only called for the root sink in volume sharing
2051 * cases, except for internal recursive calls. */
2052 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2056 pa_sink_assert_ref(s);
2057 pa_assert(old_real_volume);
2058 pa_assert_ctl_context();
2059 pa_assert(PA_SINK_IS_LINKED(s->state));
2061 /* This is called when the hardware's real volume changes due to
2062 * some external event. We copy the real volume into our
2063 * reference volume and then rebuild the stream volumes based on
2064 * i->real_ratio which should stay fixed. */
2066 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2067 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2070 /* 1. Make the real volume the reference volume */
2071 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
2074 if (pa_sink_flat_volume_enabled(s)) {
2076 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2077 pa_cvolume old_volume = i->volume;
2079 /* 2. Since the sink's reference and real volumes are equal
2080 * now our ratios should be too. */
2081 i->reference_ratio = i->real_ratio;
2083 /* 3. Recalculate the new stream reference volume based on the
2084 * reference ratio and the sink's reference volume.
2086 * This basically calculates:
2088 * i->volume = s->reference_volume * i->reference_ratio
2090 * This is identical to propagate_reference_volume() */
2091 i->volume = s->reference_volume;
2092 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2093 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2095 /* Notify if something changed */
2096 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2098 if (i->volume_changed)
2099 i->volume_changed(i);
2101 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2104 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2105 propagate_real_volume(i->origin_sink, old_real_volume);
2109 /* Something got changed in the hardware. It probably makes sense
2110 * to save changed hw settings given that hw volume changes not
2111 * triggered by PA are almost certainly done by the user. */
2112 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2113 s->save_volume = TRUE;
2116 /* Called from io thread */
2117 void pa_sink_update_volume_and_mute(pa_sink *s) {
2119 pa_sink_assert_io_context(s);
2121 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2124 /* Called from main thread */
2125 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
2126 pa_sink_assert_ref(s);
2127 pa_assert_ctl_context();
2128 pa_assert(PA_SINK_IS_LINKED(s->state));
2130 if (s->refresh_volume || force_refresh) {
2131 struct pa_cvolume old_real_volume;
2133 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2135 old_real_volume = s->real_volume;
2137 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2140 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2142 update_real_volume(s, &s->real_volume, &s->channel_map);
2143 propagate_real_volume(s, &old_real_volume);
2146 return &s->reference_volume;
2149 /* Called from main thread. In volume sharing cases, only the root sink may
2151 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2152 pa_cvolume old_real_volume;
2154 pa_sink_assert_ref(s);
2155 pa_assert_ctl_context();
2156 pa_assert(PA_SINK_IS_LINKED(s->state));
2157 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2159 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2161 old_real_volume = s->real_volume;
2162 update_real_volume(s, new_real_volume, &s->channel_map);
2163 propagate_real_volume(s, &old_real_volume);
2166 /* Called from main thread */
2167 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
2168 pa_bool_t old_muted;
2170 pa_sink_assert_ref(s);
2171 pa_assert_ctl_context();
2172 pa_assert(PA_SINK_IS_LINKED(s->state));
2174 old_muted = s->muted;
2176 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2178 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2181 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2183 if (old_muted != s->muted)
2184 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2187 /* Called from main thread */
2188 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
2190 pa_sink_assert_ref(s);
2191 pa_assert_ctl_context();
2192 pa_assert(PA_SINK_IS_LINKED(s->state));
2194 if (s->refresh_muted || force_refresh) {
2195 pa_bool_t old_muted = s->muted;
2197 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2200 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2202 if (old_muted != s->muted) {
2203 s->save_muted = TRUE;
2205 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2207 /* Make sure the soft mute status stays in sync */
2208 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2215 /* Called from main thread */
2216 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
2217 pa_sink_assert_ref(s);
2218 pa_assert_ctl_context();
2219 pa_assert(PA_SINK_IS_LINKED(s->state));
2221 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2223 if (s->muted == new_muted)
2226 s->muted = new_muted;
2227 s->save_muted = TRUE;
2229 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2232 /* Called from main thread */
2233 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2234 pa_sink_assert_ref(s);
2235 pa_assert_ctl_context();
2238 pa_proplist_update(s->proplist, mode, p);
2240 if (PA_SINK_IS_LINKED(s->state)) {
2241 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2242 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2248 /* Called from main thread */
2249 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2250 void pa_sink_set_description(pa_sink *s, const char *description) {
2252 pa_sink_assert_ref(s);
2253 pa_assert_ctl_context();
2255 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2258 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2260 if (old && description && pa_streq(old, description))
2264 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2266 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2268 if (s->monitor_source) {
2271 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2272 pa_source_set_description(s->monitor_source, n);
2276 if (PA_SINK_IS_LINKED(s->state)) {
2277 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2278 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2282 /* Called from main thread */
2283 unsigned pa_sink_linked_by(pa_sink *s) {
2286 pa_sink_assert_ref(s);
2287 pa_assert_ctl_context();
2288 pa_assert(PA_SINK_IS_LINKED(s->state));
2290 ret = pa_idxset_size(s->inputs);
2292 /* We add in the number of streams connected to us here. Please
2293 * note the asymmetry to pa_sink_used_by()! */
2295 if (s->monitor_source)
2296 ret += pa_source_linked_by(s->monitor_source);
2301 /* Called from main thread */
2302 unsigned pa_sink_used_by(pa_sink *s) {
2305 pa_sink_assert_ref(s);
2306 pa_assert_ctl_context();
2307 pa_assert(PA_SINK_IS_LINKED(s->state));
2309 ret = pa_idxset_size(s->inputs);
2310 pa_assert(ret >= s->n_corked);
2312 /* Streams connected to our monitor source do not matter for
2313 * pa_sink_used_by()!.*/
2315 return ret - s->n_corked;
2318 /* Called from main thread */
2319 unsigned pa_sink_check_suspend(pa_sink *s) {
2324 pa_sink_assert_ref(s);
2325 pa_assert_ctl_context();
2327 if (!PA_SINK_IS_LINKED(s->state))
2332 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2333 pa_sink_input_state_t st;
2335 st = pa_sink_input_get_state(i);
2337 /* We do not assert here. It is perfectly valid for a sink input to
2338 * be in the INIT state (i.e. created, marked done but not yet put)
2339 * and we should not care if it's unlinked as it won't contribute
2340 * towards our busy status.
2342 if (!PA_SINK_INPUT_IS_LINKED(st))
2345 if (st == PA_SINK_INPUT_CORKED)
2348 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2354 if (s->monitor_source)
2355 ret += pa_source_check_suspend(s->monitor_source);
2360 /* Called from the IO thread */
2361 static void sync_input_volumes_within_thread(pa_sink *s) {
2365 pa_sink_assert_ref(s);
2366 pa_sink_assert_io_context(s);
2368 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2369 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2372 i->thread_info.soft_volume = i->soft_volume;
2373 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2377 /* Called from the IO thread. Only called for the root sink in volume sharing
2378 * cases, except for internal recursive calls. */
2379 static void set_shared_volume_within_thread(pa_sink *s) {
2380 pa_sink_input *i = NULL;
2383 pa_sink_assert_ref(s);
2385 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2387 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2388 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2389 set_shared_volume_within_thread(i->origin_sink);
2393 /* Called from IO thread, except when it is not */
2394 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2395 pa_sink *s = PA_SINK(o);
2396 pa_sink_assert_ref(s);
2398 switch ((pa_sink_message_t) code) {
2400 case PA_SINK_MESSAGE_ADD_INPUT: {
2401 pa_sink_input *i = PA_SINK_INPUT(userdata);
2403 /* If you change anything here, make sure to change the
2404 * sink input handling a few lines down at
2405 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2407 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2409 /* Since the caller sleeps in pa_sink_input_put(), we can
2410 * safely access data outside of thread_info even though
2413 if ((i->thread_info.sync_prev = i->sync_prev)) {
2414 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2415 pa_assert(i->sync_prev->sync_next == i);
2416 i->thread_info.sync_prev->thread_info.sync_next = i;
2419 if ((i->thread_info.sync_next = i->sync_next)) {
2420 pa_assert(i->sink == i->thread_info.sync_next->sink);
2421 pa_assert(i->sync_next->sync_prev == i);
2422 i->thread_info.sync_next->thread_info.sync_prev = i;
2425 pa_assert(!i->thread_info.attached);
2426 i->thread_info.attached = TRUE;
2431 pa_sink_input_set_state_within_thread(i, i->state);
2433 /* The requested latency of the sink input needs to be fixed up and
2434 * then configured on the sink. If this causes the sink latency to
2435 * go down, the sink implementor is responsible for doing a rewind
2436 * in the update_requested_latency() callback to ensure that the
2437 * sink buffer doesn't contain more data than what the new latency
2440 * XXX: Does it really make sense to push this responsibility to
2441 * the sink implementors? Wouldn't it be better to do it once in
2442 * the core than many times in the modules? */
2444 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2445 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2447 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2448 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2450 /* We don't rewind here automatically. This is left to the
2451 * sink input implementor because some sink inputs need a
2452 * slow start, i.e. need some time to buffer client
2453 * samples before beginning streaming.
2455 * XXX: Does it really make sense to push this functionality to
2456 * the sink implementors? Wouldn't it be better to do it once in
2457 * the core than many times in the modules? */
2459 /* In flat volume mode we need to update the volume as
2461 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2464 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2465 pa_sink_input *i = PA_SINK_INPUT(userdata);
2467 /* If you change anything here, make sure to change the
2468 * sink input handling a few lines down at
2469 * PA_SINK_MESSAGE_START_MOVE, too. */
2474 pa_sink_input_set_state_within_thread(i, i->state);
2476 pa_assert(i->thread_info.attached);
2477 i->thread_info.attached = FALSE;
2479 /* Since the caller sleeps in pa_sink_input_unlink(),
2480 * we can safely access data outside of thread_info even
2481 * though it is mutable */
2483 pa_assert(!i->sync_prev);
2484 pa_assert(!i->sync_next);
2486 if (i->thread_info.sync_prev) {
2487 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2488 i->thread_info.sync_prev = NULL;
2491 if (i->thread_info.sync_next) {
2492 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2493 i->thread_info.sync_next = NULL;
2496 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2497 pa_sink_input_unref(i);
2499 pa_sink_invalidate_requested_latency(s, TRUE);
2500 pa_sink_request_rewind(s, (size_t) -1);
2502 /* In flat volume mode we need to update the volume as
2504 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2507 case PA_SINK_MESSAGE_START_MOVE: {
2508 pa_sink_input *i = PA_SINK_INPUT(userdata);
2510 /* We don't support moving synchronized streams. */
2511 pa_assert(!i->sync_prev);
2512 pa_assert(!i->sync_next);
2513 pa_assert(!i->thread_info.sync_next);
2514 pa_assert(!i->thread_info.sync_prev);
2516 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2518 size_t sink_nbytes, total_nbytes;
2520 /* The old sink probably has some audio from this
2521 * stream in its buffer. We want to "take it back" as
2522 * much as possible and play it to the new sink. We
2523 * don't know at this point how much the old sink can
2524 * rewind. We have to pick something, and that
2525 * something is the full latency of the old sink here.
2526 * So we rewind the stream buffer by the sink latency
2527 * amount, which may be more than what we should
2528 * rewind. This can result in a chunk of audio being
2529 * played both to the old sink and the new sink.
2531 * FIXME: Fix this code so that we don't have to make
2532 * guesses about how much the sink will actually be
2533 * able to rewind. If someone comes up with a solution
2534 * for this, something to note is that the part of the
2535 * latency that the old sink couldn't rewind should
2536 * ideally be compensated after the stream has moved
2537 * to the new sink by adding silence. The new sink
2538 * most likely can't start playing the moved stream
2539 * immediately, and that gap should be removed from
2540 * the "compensation silence" (at least at the time of
2541 * writing this, the move finish code will actually
2542 * already take care of dropping the new sink's
2543 * unrewindable latency, so taking into account the
2544 * unrewindable latency of the old sink is the only
2547 * The render_memblockq contents are discarded,
2548 * because when the sink changes, the format of the
2549 * audio stored in the render_memblockq may change
2550 * too, making the stored audio invalid. FIXME:
2551 * However, the read and write indices are moved back
2552 * the same amount, so if they are not the same now,
2553 * they won't be the same after the rewind either. If
2554 * the write index of the render_memblockq is ahead of
2555 * the read index, then the render_memblockq will feed
2556 * the new sink some silence first, which it shouldn't
2557 * do. The write index should be flushed to be the
2558 * same as the read index. */
2560 /* Get the latency of the sink */
2561 usec = pa_sink_get_latency_within_thread(s);
2562 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2563 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2565 if (total_nbytes > 0) {
2566 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2567 i->thread_info.rewrite_flush = TRUE;
2568 pa_sink_input_process_rewind(i, sink_nbytes);
2575 pa_assert(i->thread_info.attached);
2576 i->thread_info.attached = FALSE;
2578 /* Let's remove the sink input ...*/
2579 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2580 pa_sink_input_unref(i);
2582 pa_sink_invalidate_requested_latency(s, TRUE);
2584 pa_log_debug("Requesting rewind due to started move");
2585 pa_sink_request_rewind(s, (size_t) -1);
2587 /* In flat volume mode we need to update the volume as
2589 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2592 case PA_SINK_MESSAGE_FINISH_MOVE: {
2593 pa_sink_input *i = PA_SINK_INPUT(userdata);
2595 /* We don't support moving synchronized streams. */
2596 pa_assert(!i->sync_prev);
2597 pa_assert(!i->sync_next);
2598 pa_assert(!i->thread_info.sync_next);
2599 pa_assert(!i->thread_info.sync_prev);
2601 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2603 pa_assert(!i->thread_info.attached);
2604 i->thread_info.attached = TRUE;
2609 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2613 /* In the ideal case the new sink would start playing
2614 * the stream immediately. That requires the sink to
2615 * be able to rewind all of its latency, which usually
2616 * isn't possible, so there will probably be some gap
2617 * before the moved stream becomes audible. We then
2618 * have two possibilities: 1) start playing the stream
2619 * from where it is now, or 2) drop the unrewindable
2620 * latency of the sink from the stream. With option 1
2621 * we won't lose any audio but the stream will have a
2622 * pause. With option 2 we may lose some audio but the
2623 * stream time will be somewhat in sync with the wall
2624 * clock. Lennart seems to have chosen option 2 (one
2625 * of the reasons might have been that option 1 is
2626 * actually much harder to implement), so we drop the
2627 * latency of the new sink from the moved stream and
2628 * hope that the sink will undo most of that in the
2631 /* Get the latency of the sink */
2632 usec = pa_sink_get_latency_within_thread(s);
2633 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2636 pa_sink_input_drop(i, nbytes);
2638 pa_log_debug("Requesting rewind due to finished move");
2639 pa_sink_request_rewind(s, nbytes);
2642 /* Updating the requested sink latency has to be done
2643 * after the sink rewind request, not before, because
2644 * otherwise the sink may limit the rewind amount
2647 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2648 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2650 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2651 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2653 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2656 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2657 pa_sink *root_sink = pa_sink_get_master(s);
2659 if (PA_LIKELY(root_sink))
2660 set_shared_volume_within_thread(root_sink);
2665 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2667 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2669 pa_sink_volume_change_push(s);
2671 /* Fall through ... */
2673 case PA_SINK_MESSAGE_SET_VOLUME:
2675 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2676 s->thread_info.soft_volume = s->soft_volume;
2677 pa_sink_request_rewind(s, (size_t) -1);
2680 /* Fall through ... */
2682 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2683 sync_input_volumes_within_thread(s);
2686 case PA_SINK_MESSAGE_GET_VOLUME:
2688 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2690 pa_sink_volume_change_flush(s);
2691 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2694 /* In case sink implementor reset SW volume. */
2695 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2696 s->thread_info.soft_volume = s->soft_volume;
2697 pa_sink_request_rewind(s, (size_t) -1);
2702 case PA_SINK_MESSAGE_SET_MUTE:
2704 if (s->thread_info.soft_muted != s->muted) {
2705 s->thread_info.soft_muted = s->muted;
2706 pa_sink_request_rewind(s, (size_t) -1);
2709 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2714 case PA_SINK_MESSAGE_GET_MUTE:
2716 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2721 case PA_SINK_MESSAGE_SET_STATE: {
2723 pa_bool_t suspend_change =
2724 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2725 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2727 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2729 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2730 s->thread_info.rewind_nbytes = 0;
2731 s->thread_info.rewind_requested = FALSE;
2734 if (suspend_change) {
2738 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2739 if (i->suspend_within_thread)
2740 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2746 case PA_SINK_MESSAGE_DETACH:
2748 /* Detach all streams */
2749 pa_sink_detach_within_thread(s);
2752 case PA_SINK_MESSAGE_ATTACH:
2754 /* Reattach all streams */
2755 pa_sink_attach_within_thread(s);
2758 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2760 pa_usec_t *usec = userdata;
2761 *usec = pa_sink_get_requested_latency_within_thread(s);
2763 /* Yes, that's right, the IO thread will see -1 when no
2764 * explicit requested latency is configured, the main
2765 * thread will see max_latency */
2766 if (*usec == (pa_usec_t) -1)
2767 *usec = s->thread_info.max_latency;
2772 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2773 pa_usec_t *r = userdata;
2775 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2780 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2781 pa_usec_t *r = userdata;
2783 r[0] = s->thread_info.min_latency;
2784 r[1] = s->thread_info.max_latency;
2789 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2791 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2794 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2796 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2799 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2801 *((size_t*) userdata) = s->thread_info.max_rewind;
2804 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2806 *((size_t*) userdata) = s->thread_info.max_request;
2809 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2811 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2814 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2816 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2819 case PA_SINK_MESSAGE_SET_PORT:
2821 pa_assert(userdata);
2823 struct sink_message_set_port *msg_data = userdata;
2824 msg_data->ret = s->set_port(s, msg_data->port);
2828 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2829 /* This message is sent from IO-thread and handled in main thread. */
2830 pa_assert_ctl_context();
2832 /* Make sure we're not messing with main thread when no longer linked */
2833 if (!PA_SINK_IS_LINKED(s->state))
2836 pa_sink_get_volume(s, TRUE);
2837 pa_sink_get_mute(s, TRUE);
2840 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
2841 s->thread_info.latency_offset = offset;
2844 case PA_SINK_MESSAGE_GET_LATENCY:
2845 case PA_SINK_MESSAGE_MAX:
2852 /* Called from main thread */
2853 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2858 pa_core_assert_ref(c);
2859 pa_assert_ctl_context();
2860 pa_assert(cause != 0);
2862 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2865 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2872 /* Called from main thread */
2873 void pa_sink_detach(pa_sink *s) {
2874 pa_sink_assert_ref(s);
2875 pa_assert_ctl_context();
2876 pa_assert(PA_SINK_IS_LINKED(s->state));
2878 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2881 /* Called from main thread */
2882 void pa_sink_attach(pa_sink *s) {
2883 pa_sink_assert_ref(s);
2884 pa_assert_ctl_context();
2885 pa_assert(PA_SINK_IS_LINKED(s->state));
2887 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2890 /* Called from IO thread */
2891 void pa_sink_detach_within_thread(pa_sink *s) {
2895 pa_sink_assert_ref(s);
2896 pa_sink_assert_io_context(s);
2897 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2899 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2903 if (s->monitor_source)
2904 pa_source_detach_within_thread(s->monitor_source);
2907 /* Called from IO thread */
2908 void pa_sink_attach_within_thread(pa_sink *s) {
2912 pa_sink_assert_ref(s);
2913 pa_sink_assert_io_context(s);
2914 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2916 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2920 if (s->monitor_source)
2921 pa_source_attach_within_thread(s->monitor_source);
2924 /* Called from IO thread */
2925 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2926 pa_sink_assert_ref(s);
2927 pa_sink_assert_io_context(s);
2928 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2930 if (nbytes == (size_t) -1)
2931 nbytes = s->thread_info.max_rewind;
2933 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2935 if (s->thread_info.rewind_requested &&
2936 nbytes <= s->thread_info.rewind_nbytes)
2939 s->thread_info.rewind_nbytes = nbytes;
2940 s->thread_info.rewind_requested = TRUE;
2942 if (s->request_rewind)
2943 s->request_rewind(s);
2946 /* Called from IO thread */
2947 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2948 pa_usec_t result = (pa_usec_t) -1;
2951 pa_usec_t monitor_latency;
2953 pa_sink_assert_ref(s);
2954 pa_sink_assert_io_context(s);
2956 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2957 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2959 if (s->thread_info.requested_latency_valid)
2960 return s->thread_info.requested_latency;
2962 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2963 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2964 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2965 result = i->thread_info.requested_sink_latency;
2967 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2969 if (monitor_latency != (pa_usec_t) -1 &&
2970 (result == (pa_usec_t) -1 || result > monitor_latency))
2971 result = monitor_latency;
2973 if (result != (pa_usec_t) -1)
2974 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2976 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2977 /* Only cache if properly initialized */
2978 s->thread_info.requested_latency = result;
2979 s->thread_info.requested_latency_valid = TRUE;
2985 /* Called from main thread */
2986 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2989 pa_sink_assert_ref(s);
2990 pa_assert_ctl_context();
2991 pa_assert(PA_SINK_IS_LINKED(s->state));
2993 if (s->state == PA_SINK_SUSPENDED)
2996 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3001 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3002 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3006 pa_sink_assert_ref(s);
3007 pa_sink_assert_io_context(s);
3009 if (max_rewind == s->thread_info.max_rewind)
3012 s->thread_info.max_rewind = max_rewind;
3014 if (PA_SINK_IS_LINKED(s->thread_info.state))
3015 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3016 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3018 if (s->monitor_source)
3019 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3022 /* Called from main thread */
3023 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3024 pa_sink_assert_ref(s);
3025 pa_assert_ctl_context();
3027 if (PA_SINK_IS_LINKED(s->state))
3028 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3030 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3033 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3034 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3037 pa_sink_assert_ref(s);
3038 pa_sink_assert_io_context(s);
3040 if (max_request == s->thread_info.max_request)
3043 s->thread_info.max_request = max_request;
3045 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3048 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3049 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3053 /* Called from main thread */
3054 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3055 pa_sink_assert_ref(s);
3056 pa_assert_ctl_context();
3058 if (PA_SINK_IS_LINKED(s->state))
3059 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3061 pa_sink_set_max_request_within_thread(s, max_request);
3064 /* Called from IO thread */
3065 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
3069 pa_sink_assert_ref(s);
3070 pa_sink_assert_io_context(s);
3072 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3073 s->thread_info.requested_latency_valid = FALSE;
3077 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3079 if (s->update_requested_latency)
3080 s->update_requested_latency(s);
3082 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3083 if (i->update_sink_requested_latency)
3084 i->update_sink_requested_latency(i);
3088 /* Called from main thread */
3089 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3090 pa_sink_assert_ref(s);
3091 pa_assert_ctl_context();
3093 /* min_latency == 0: no limit
3094 * min_latency anything else: specified limit
3096 * Similar for max_latency */
3098 if (min_latency < ABSOLUTE_MIN_LATENCY)
3099 min_latency = ABSOLUTE_MIN_LATENCY;
3101 if (max_latency <= 0 ||
3102 max_latency > ABSOLUTE_MAX_LATENCY)
3103 max_latency = ABSOLUTE_MAX_LATENCY;
3105 pa_assert(min_latency <= max_latency);
3107 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3108 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3109 max_latency == ABSOLUTE_MAX_LATENCY) ||
3110 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3112 if (PA_SINK_IS_LINKED(s->state)) {
3118 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3120 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3123 /* Called from main thread */
3124 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3125 pa_sink_assert_ref(s);
3126 pa_assert_ctl_context();
3127 pa_assert(min_latency);
3128 pa_assert(max_latency);
3130 if (PA_SINK_IS_LINKED(s->state)) {
3131 pa_usec_t r[2] = { 0, 0 };
3133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3135 *min_latency = r[0];
3136 *max_latency = r[1];
3138 *min_latency = s->thread_info.min_latency;
3139 *max_latency = s->thread_info.max_latency;
3143 /* Called from IO thread */
3144 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3145 pa_sink_assert_ref(s);
3146 pa_sink_assert_io_context(s);
3148 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3149 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3150 pa_assert(min_latency <= max_latency);
3152 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3153 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3154 max_latency == ABSOLUTE_MAX_LATENCY) ||
3155 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3157 if (s->thread_info.min_latency == min_latency &&
3158 s->thread_info.max_latency == max_latency)
3161 s->thread_info.min_latency = min_latency;
3162 s->thread_info.max_latency = max_latency;
3164 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3168 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3169 if (i->update_sink_latency_range)
3170 i->update_sink_latency_range(i);
3173 pa_sink_invalidate_requested_latency(s, FALSE);
3175 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3178 /* Called from main thread */
3179 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3180 pa_sink_assert_ref(s);
3181 pa_assert_ctl_context();
3183 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3184 pa_assert(latency == 0);
3188 if (latency < ABSOLUTE_MIN_LATENCY)
3189 latency = ABSOLUTE_MIN_LATENCY;
3191 if (latency > ABSOLUTE_MAX_LATENCY)
3192 latency = ABSOLUTE_MAX_LATENCY;
3194 if (PA_SINK_IS_LINKED(s->state))
3195 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3197 s->thread_info.fixed_latency = latency;
3199 pa_source_set_fixed_latency(s->monitor_source, latency);
3202 /* Called from main thread */
3203 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3206 pa_sink_assert_ref(s);
3207 pa_assert_ctl_context();
3209 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3212 if (PA_SINK_IS_LINKED(s->state))
3213 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3215 latency = s->thread_info.fixed_latency;
3220 /* Called from IO thread */
3221 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3222 pa_sink_assert_ref(s);
3223 pa_sink_assert_io_context(s);
3225 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3226 pa_assert(latency == 0);
3230 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3231 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3233 if (s->thread_info.fixed_latency == latency)
3236 s->thread_info.fixed_latency = latency;
3238 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3242 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3243 if (i->update_sink_fixed_latency)
3244 i->update_sink_fixed_latency(i);
3247 pa_sink_invalidate_requested_latency(s, FALSE);
3249 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3252 /* Called from main context */
3253 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3254 pa_sink_assert_ref(s);
3256 s->latency_offset = offset;
3258 if (PA_SINK_IS_LINKED(s->state))
3259 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3261 s->thread_info.latency_offset = offset;
3264 /* Called from main context */
3265 size_t pa_sink_get_max_rewind(pa_sink *s) {
3267 pa_assert_ctl_context();
3268 pa_sink_assert_ref(s);
3270 if (!PA_SINK_IS_LINKED(s->state))
3271 return s->thread_info.max_rewind;
3273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3278 /* Called from main context */
3279 size_t pa_sink_get_max_request(pa_sink *s) {
3281 pa_sink_assert_ref(s);
3282 pa_assert_ctl_context();
3284 if (!PA_SINK_IS_LINKED(s->state))
3285 return s->thread_info.max_request;
3287 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3292 /* Called from main context */
3293 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
3294 pa_device_port *port;
3297 pa_sink_assert_ref(s);
3298 pa_assert_ctl_context();
3301 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3302 return -PA_ERR_NOTIMPLEMENTED;
3306 return -PA_ERR_NOENTITY;
3308 if (!(port = pa_hashmap_get(s->ports, name)))
3309 return -PA_ERR_NOENTITY;
3311 if (s->active_port == port) {
3312 s->save_port = s->save_port || save;
3316 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3317 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3318 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3322 ret = s->set_port(s, port);
3325 return -PA_ERR_NOENTITY;
3327 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3329 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3331 s->active_port = port;
3332 s->save_port = save;
3334 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3336 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3341 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
3342 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3346 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3349 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3351 if (pa_streq(ff, "microphone"))
3352 t = "audio-input-microphone";
3353 else if (pa_streq(ff, "webcam"))
3355 else if (pa_streq(ff, "computer"))
3357 else if (pa_streq(ff, "handset"))
3359 else if (pa_streq(ff, "portable"))
3360 t = "multimedia-player";
3361 else if (pa_streq(ff, "tv"))
3362 t = "video-display";
3365 * The following icons are not part of the icon naming spec,
3366 * because Rodney Dawes sucks as the maintainer of that spec.
3368 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3370 else if (pa_streq(ff, "headset"))
3371 t = "audio-headset";
3372 else if (pa_streq(ff, "headphone"))
3373 t = "audio-headphones";
3374 else if (pa_streq(ff, "speaker"))
3375 t = "audio-speakers";
3376 else if (pa_streq(ff, "hands-free"))
3377 t = "audio-handsfree";
3381 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3382 if (pa_streq(c, "modem"))
3389 t = "audio-input-microphone";
3392 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3393 if (strstr(profile, "analog"))
3395 else if (strstr(profile, "iec958"))
3397 else if (strstr(profile, "hdmi"))
3401 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3403 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3408 pa_bool_t pa_device_init_description(pa_proplist *p) {
3409 const char *s, *d = NULL, *k;
3412 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3415 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3416 if (pa_streq(s, "internal"))
3417 d = _("Built-in Audio");
3420 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3421 if (pa_streq(s, "modem"))
3425 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3430 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3433 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3435 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3440 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3444 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3447 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3448 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3449 || pa_streq(s, "headset")) {
3450 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3457 unsigned pa_device_init_priority(pa_proplist *p) {
3459 unsigned priority = 0;
3463 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3465 if (pa_streq(s, "sound"))
3467 else if (!pa_streq(s, "modem"))
3471 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3473 if (pa_streq(s, "internal"))
3475 else if (pa_streq(s, "speaker"))
3477 else if (pa_streq(s, "headphone"))
3481 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3483 if (pa_streq(s, "pci"))
3485 else if (pa_streq(s, "usb"))
3487 else if (pa_streq(s, "bluetooth"))
3491 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3493 if (pa_startswith(s, "analog-"))
3495 else if (pa_startswith(s, "iec958-"))
3502 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3504 /* Called from the IO thread. */
3505 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3506 pa_sink_volume_change *c;
3507 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3508 c = pa_xnew(pa_sink_volume_change, 1);
3510 PA_LLIST_INIT(pa_sink_volume_change, c);
3512 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3516 /* Called from the IO thread. */
3517 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3519 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3523 /* Called from the IO thread. */
3524 void pa_sink_volume_change_push(pa_sink *s) {
3525 pa_sink_volume_change *c = NULL;
3526 pa_sink_volume_change *nc = NULL;
3527 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3529 const char *direction = NULL;
3532 nc = pa_sink_volume_change_new(s);
3534 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3535 * Adding one more volume for HW would get us rid of this, but I am trying
3536 * to survive with the ones we already have. */
3537 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3539 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3540 pa_log_debug("Volume not changing");
3541 pa_sink_volume_change_free(nc);
3545 nc->at = pa_sink_get_latency_within_thread(s);
3546 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3548 if (s->thread_info.volume_changes_tail) {
3549 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3550 /* If volume is going up let's do it a bit late. If it is going
3551 * down let's do it a bit early. */
3552 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3553 if (nc->at + safety_margin > c->at) {
3554 nc->at += safety_margin;
3559 else if (nc->at - safety_margin > c->at) {
3560 nc->at -= safety_margin;
3568 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3569 nc->at += safety_margin;
3572 nc->at -= safety_margin;
3575 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3578 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3581 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3583 /* We can ignore volume events that came earlier but should happen later than this. */
3584 PA_LLIST_FOREACH(c, nc->next) {
3585 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3586 pa_sink_volume_change_free(c);
3589 s->thread_info.volume_changes_tail = nc;
3592 /* Called from the IO thread. */
3593 static void pa_sink_volume_change_flush(pa_sink *s) {
3594 pa_sink_volume_change *c = s->thread_info.volume_changes;
3596 s->thread_info.volume_changes = NULL;
3597 s->thread_info.volume_changes_tail = NULL;
3599 pa_sink_volume_change *next = c->next;
3600 pa_sink_volume_change_free(c);
3605 /* Called from the IO thread. */
3606 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3608 pa_bool_t ret = FALSE;
3612 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3618 pa_assert(s->write_volume);
3620 now = pa_rtclock_now();
3622 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3623 pa_sink_volume_change *c = s->thread_info.volume_changes;
3624 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3625 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3626 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3628 s->thread_info.current_hw_volume = c->hw_volume;
3629 pa_sink_volume_change_free(c);
3635 if (s->thread_info.volume_changes) {
3637 *usec_to_next = s->thread_info.volume_changes->at - now;
3638 if (pa_log_ratelimit(PA_LOG_DEBUG))
3639 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3644 s->thread_info.volume_changes_tail = NULL;
3649 /* Called from the IO thread. */
3650 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3651 /* All the queued volume events later than current latency are shifted to happen earlier. */
3652 pa_sink_volume_change *c;
3653 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3654 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3655 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3657 pa_log_debug("latency = %lld", (long long) limit);
3658 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3660 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3661 pa_usec_t modified_limit = limit;
3662 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3663 modified_limit -= s->thread_info.volume_change_safety_margin;
3665 modified_limit += s->thread_info.volume_change_safety_margin;
3666 if (c->at > modified_limit) {
3668 if (c->at < modified_limit)
3669 c->at = modified_limit;
3671 prev_vol = pa_cvolume_avg(&c->hw_volume);
3673 pa_sink_volume_change_apply(s, NULL);
3676 /* Called from the main thread */
3677 /* Gets the list of formats supported by the sink. The members and idxset must
3678 * be freed by the caller. */
3679 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3684 if (s->get_formats) {
3685 /* Sink supports format query, all is good */
3686 ret = s->get_formats(s);
3688 /* Sink doesn't support format query, so assume it does PCM */
3689 pa_format_info *f = pa_format_info_new();
3690 f->encoding = PA_ENCODING_PCM;
3692 ret = pa_idxset_new(NULL, NULL);
3693 pa_idxset_put(ret, f, NULL);
3699 /* Called from the main thread */
3700 /* Allows an external source to set what formats a sink supports if the sink
3701 * permits this. The function makes a copy of the formats on success. */
3702 pa_bool_t pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3707 /* Sink supports setting formats -- let's give it a shot */
3708 return s->set_formats(s, formats);
3710 /* Sink doesn't support setting this -- bail out */
3714 /* Called from the main thread */
3715 /* Checks if the sink can accept this format */
3716 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3718 pa_idxset *formats = NULL;
3719 pa_bool_t ret = FALSE;
3724 formats = pa_sink_get_formats(s);
3727 pa_format_info *finfo_device;
3730 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3731 if (pa_format_info_is_compatible(finfo_device, f)) {
3737 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3743 /* Called from the main thread */
3744 /* Calculates the intersection between formats supported by the sink and
3745 * in_formats, and returns these, in the order of the sink's formats. */
3746 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3747 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3748 pa_format_info *f_sink, *f_in;
3753 if (!in_formats || pa_idxset_isempty(in_formats))
3756 sink_formats = pa_sink_get_formats(s);
3758 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3759 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3760 if (pa_format_info_is_compatible(f_sink, f_in))
3761 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3767 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);