2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/core-subscribe.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/play-memblockq.h>
49 #include <pulsecore/flist.h>
53 #define MAX_MIX_CHANNELS 32
54 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
55 #define ABSOLUTE_MIN_LATENCY (500)
56 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
57 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
59 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
61 struct pa_sink_volume_change {
65 PA_LLIST_FIELDS(pa_sink_volume_change);
68 struct sink_message_set_port {
73 static void sink_free(pa_object *s);
75 static void pa_sink_volume_change_push(pa_sink *s);
76 static void pa_sink_volume_change_flush(pa_sink *s);
77 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
79 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
83 data->proplist = pa_proplist_new();
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
92 data->name = pa_xstrdup(name);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 data->alternate_sample_rate_is_set = TRUE;
113 data->alternate_sample_rate = alternate_sample_rate;
116 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 if ((data->volume_is_set = !!volume))
120 data->volume = *volume;
123 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
126 data->muted_is_set = TRUE;
127 data->muted = !!mute;
130 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_xfree(data->active_port);
134 data->active_port = pa_xstrdup(port);
137 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_proplist_free(data->proplist);
143 pa_device_port_hashmap_free(data->ports);
145 pa_xfree(data->name);
146 pa_xfree(data->active_port);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->write_volume = NULL;
160 s->request_rewind = NULL;
161 s->update_requested_latency = NULL;
163 s->get_formats = NULL;
164 s->set_formats = NULL;
165 s->update_rate = NULL;
168 /* Called from main context */
169 pa_sink* pa_sink_new(
171 pa_sink_new_data *data,
172 pa_sink_flags_t flags) {
176 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
177 pa_source_new_data source_data;
183 pa_assert(data->name);
184 pa_assert_ctl_context();
186 s = pa_msgobject_new(pa_sink);
188 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
189 pa_log_debug("Failed to register name %s.", data->name);
194 pa_sink_new_data_set_name(data, name);
196 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
198 pa_namereg_unregister(core, name);
202 /* FIXME, need to free s here on failure */
204 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
205 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
207 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
209 if (!data->channel_map_is_set)
210 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
212 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
213 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
215 /* FIXME: There should probably be a general function for checking whether
216 * the sink volume is allowed to be set, like there is for sink inputs. */
217 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
219 if (!data->volume_is_set) {
220 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
221 data->save_volume = FALSE;
224 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
225 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
227 if (!data->muted_is_set)
231 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
233 pa_device_init_description(data->proplist);
234 pa_device_init_icon(data->proplist, TRUE);
235 pa_device_init_intended_roles(data->proplist);
237 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
239 pa_namereg_unregister(core, name);
243 s->parent.parent.free = sink_free;
244 s->parent.process_msg = pa_sink_process_msg;
247 s->state = PA_SINK_INIT;
250 s->suspend_cause = 0;
251 pa_sink_set_mixer_dirty(s, FALSE);
252 s->name = pa_xstrdup(name);
253 s->proplist = pa_proplist_copy(data->proplist);
254 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
255 s->module = data->module;
256 s->card = data->card;
258 s->priority = pa_device_init_priority(s->proplist);
260 s->sample_spec = data->sample_spec;
261 s->channel_map = data->channel_map;
262 s->default_sample_rate = s->sample_spec.rate;
264 if (data->alternate_sample_rate_is_set)
265 s->alternate_sample_rate = data->alternate_sample_rate;
267 s->alternate_sample_rate = s->core->alternate_sample_rate;
269 if (s->sample_spec.rate == s->alternate_sample_rate) {
270 pa_log_warn("Default and alternate sample rates are the same.");
271 s->alternate_sample_rate = 0;
274 s->inputs = pa_idxset_new(NULL, NULL);
276 s->input_to_master = NULL;
278 s->reference_volume = s->real_volume = data->volume;
279 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
280 s->base_volume = PA_VOLUME_NORM;
281 s->n_volume_steps = PA_VOLUME_NORM+1;
282 s->muted = data->muted;
283 s->refresh_volume = s->refresh_muted = FALSE;
290 /* As a minor optimization we just steal the list instead of
292 s->ports = data->ports;
295 s->active_port = NULL;
296 s->save_port = FALSE;
298 if (data->active_port && s->ports)
299 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
300 s->save_port = data->save_port;
302 if (!s->active_port && s->ports) {
306 PA_HASHMAP_FOREACH(p, s->ports, state)
307 if (!s->active_port || p->priority > s->active_port->priority)
311 s->save_volume = data->save_volume;
312 s->save_muted = data->save_muted;
314 pa_silence_memchunk_get(
315 &core->silence_cache,
321 pa_cvolume_ramp_int_init(&s->ramp, PA_VOLUME_NORM, data->sample_spec.channels);
323 s->thread_info.rtpoll = NULL;
324 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
325 s->thread_info.soft_volume = s->soft_volume;
326 s->thread_info.soft_muted = s->muted;
327 s->thread_info.state = s->state;
328 s->thread_info.rewind_nbytes = 0;
329 s->thread_info.rewind_requested = FALSE;
330 s->thread_info.max_rewind = 0;
331 s->thread_info.max_request = 0;
332 s->thread_info.requested_latency_valid = FALSE;
333 s->thread_info.requested_latency = 0;
334 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
335 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
336 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
338 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
339 s->thread_info.volume_changes_tail = NULL;
340 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
341 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
342 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
344 s->thread_info.ramp = s->ramp;
346 /* FIXME: This should probably be moved to pa_sink_put() */
347 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
350 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
352 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
353 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
356 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
357 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
361 pa_source_new_data_init(&source_data);
362 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
363 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
364 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
365 source_data.name = pa_sprintf_malloc("%s.monitor", name);
366 source_data.driver = data->driver;
367 source_data.module = data->module;
368 source_data.card = data->card;
370 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
371 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
372 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
374 s->monitor_source = pa_source_new(core, &source_data,
375 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
376 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
378 pa_source_new_data_done(&source_data);
380 if (!s->monitor_source) {
386 s->monitor_source->monitor_of = s;
388 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
389 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
390 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
395 /* Called from main context */
396 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
398 pa_bool_t suspend_change;
399 pa_sink_state_t original_state;
402 pa_assert_ctl_context();
404 if (s->state == state)
407 original_state = s->state;
410 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
411 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
414 if ((ret = s->set_state(s, state)) < 0)
418 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
421 s->set_state(s, original_state);
428 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
429 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
430 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
433 if (suspend_change) {
437 /* We're suspending or resuming, tell everyone about it */
439 PA_IDXSET_FOREACH(i, s->inputs, idx)
440 if (s->state == PA_SINK_SUSPENDED &&
441 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
442 pa_sink_input_kill(i);
444 i->suspend(i, state == PA_SINK_SUSPENDED);
446 if (s->monitor_source)
447 pa_source_sync_suspend(s->monitor_source);
453 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
459 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
460 pa_sink_flags_t flags;
463 pa_assert(!s->write_volume || cb);
467 /* Save the current flags so we can tell if they've changed */
471 /* The sink implementor is responsible for setting decibel volume support */
472 s->flags |= PA_SINK_HW_VOLUME_CTRL;
474 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
475 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
476 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
479 /* If the flags have changed after init, let any clients know via a change event */
480 if (s->state != PA_SINK_INIT && flags != s->flags)
481 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
484 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
485 pa_sink_flags_t flags;
488 pa_assert(!cb || s->set_volume);
490 s->write_volume = cb;
492 /* Save the current flags so we can tell if they've changed */
496 s->flags |= PA_SINK_DEFERRED_VOLUME;
498 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
500 /* If the flags have changed after init, let any clients know via a change event */
501 if (s->state != PA_SINK_INIT && flags != s->flags)
502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
505 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
511 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
512 pa_sink_flags_t flags;
518 /* Save the current flags so we can tell if they've changed */
522 s->flags |= PA_SINK_HW_MUTE_CTRL;
524 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
526 /* If the flags have changed after init, let any clients know via a change event */
527 if (s->state != PA_SINK_INIT && flags != s->flags)
528 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
531 static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
532 pa_sink_flags_t flags;
536 /* Always follow the overall user preference here */
537 enable = enable && s->core->flat_volumes;
539 /* Save the current flags so we can tell if they've changed */
543 s->flags |= PA_SINK_FLAT_VOLUME;
545 s->flags &= ~PA_SINK_FLAT_VOLUME;
547 /* If the flags have changed after init, let any clients know via a change event */
548 if (s->state != PA_SINK_INIT && flags != s->flags)
549 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
552 void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
553 pa_sink_flags_t flags;
557 /* Save the current flags so we can tell if they've changed */
561 s->flags |= PA_SINK_DECIBEL_VOLUME;
562 enable_flat_volume(s, TRUE);
564 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
565 enable_flat_volume(s, FALSE);
568 /* If the flags have changed after init, let any clients know via a change event */
569 if (s->state != PA_SINK_INIT && flags != s->flags)
570 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
573 /* Called from main context */
574 void pa_sink_put(pa_sink* s) {
575 pa_sink_assert_ref(s);
576 pa_assert_ctl_context();
578 pa_assert(s->state == PA_SINK_INIT);
579 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
581 /* The following fields must be initialized properly when calling _put() */
582 pa_assert(s->asyncmsgq);
583 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
585 /* Generally, flags should be initialized via pa_sink_new(). As a
586 * special exception we allow some volume related flags to be set
587 * between _new() and _put() by the callback setter functions above.
589 * Thus we implement a couple safeguards here which ensure the above
590 * setters were used (or at least the implementor made manual changes
591 * in a compatible way).
593 * Note: All of these flags set here can change over the life time
595 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
596 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
597 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
599 /* XXX: Currently decibel volume is disabled for all sinks that use volume
600 * sharing. When the master sink supports decibel volume, it would be good
601 * to have the flag also in the filter sink, but currently we don't do that
602 * so that the flags of the filter sink never change when it's moved from
603 * a master sink to another. One solution for this problem would be to
604 * remove user-visible volume altogether from filter sinks when volume
605 * sharing is used, but the current approach was easier to implement... */
606 /* We always support decibel volumes in software, otherwise we leave it to
607 * the sink implementor to set this flag as needed.
609 * Note: This flag can also change over the life time of the sink. */
610 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
611 pa_sink_enable_decibel_volume(s, TRUE);
613 /* If the sink implementor support DB volumes by itself, we should always
614 * try and enable flat volumes too */
615 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
616 enable_flat_volume(s, TRUE);
618 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
619 pa_sink *root_sink = pa_sink_get_master(s);
621 pa_assert(root_sink);
623 s->reference_volume = root_sink->reference_volume;
624 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
626 s->real_volume = root_sink->real_volume;
627 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
629 /* We assume that if the sink implementor changed the default
630 * volume he did so in real_volume, because that is the usual
631 * place where he is supposed to place his changes. */
632 s->reference_volume = s->real_volume;
634 s->thread_info.soft_volume = s->soft_volume;
635 s->thread_info.soft_muted = s->muted;
636 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
638 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
639 || (s->base_volume == PA_VOLUME_NORM
640 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
641 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
642 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
643 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
644 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
646 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
647 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
648 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
650 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
652 pa_source_put(s->monitor_source);
654 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
655 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
658 /* Called from main context */
659 void pa_sink_unlink(pa_sink* s) {
661 pa_sink_input *i, *j = NULL;
664 pa_assert_ctl_context();
666 /* Please note that pa_sink_unlink() does more than simply
667 * reversing pa_sink_put(). It also undoes the registrations
668 * already done in pa_sink_new()! */
670 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
671 * may be called multiple times on the same sink without bad
674 linked = PA_SINK_IS_LINKED(s->state);
677 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
679 if (s->state != PA_SINK_UNLINKED)
680 pa_namereg_unregister(s->core, s->name);
681 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
684 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
686 while ((i = pa_idxset_first(s->inputs, NULL))) {
688 pa_sink_input_kill(i);
693 sink_set_state(s, PA_SINK_UNLINKED);
695 s->state = PA_SINK_UNLINKED;
699 if (s->monitor_source)
700 pa_source_unlink(s->monitor_source);
703 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
704 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
708 /* Called from main context */
709 static void sink_free(pa_object *o) {
710 pa_sink *s = PA_SINK(o);
714 pa_assert_ctl_context();
715 pa_assert(pa_sink_refcnt(s) == 0);
717 if (PA_SINK_IS_LINKED(s->state))
720 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
722 if (s->monitor_source) {
723 pa_source_unref(s->monitor_source);
724 s->monitor_source = NULL;
727 pa_idxset_free(s->inputs, NULL, NULL);
729 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
730 pa_sink_input_unref(i);
732 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
734 if (s->silence.memblock)
735 pa_memblock_unref(s->silence.memblock);
741 pa_proplist_free(s->proplist);
744 pa_device_port_hashmap_free(s->ports);
749 /* Called from main context, and not while the IO thread is active, please */
750 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
751 pa_sink_assert_ref(s);
752 pa_assert_ctl_context();
756 if (s->monitor_source)
757 pa_source_set_asyncmsgq(s->monitor_source, q);
760 /* Called from main context, and not while the IO thread is active, please */
761 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
762 pa_sink_assert_ref(s);
763 pa_assert_ctl_context();
768 /* For now, allow only a minimal set of flags to be changed. */
769 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
771 s->flags = (s->flags & ~mask) | (value & mask);
773 pa_source_update_flags(s->monitor_source,
774 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
775 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
776 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
777 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
780 /* Called from IO context, or before _put() from main context */
781 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
782 pa_sink_assert_ref(s);
783 pa_sink_assert_io_context(s);
785 s->thread_info.rtpoll = p;
787 if (s->monitor_source)
788 pa_source_set_rtpoll(s->monitor_source, p);
791 /* Called from main context */
792 int pa_sink_update_status(pa_sink*s) {
793 pa_sink_assert_ref(s);
794 pa_assert_ctl_context();
795 pa_assert(PA_SINK_IS_LINKED(s->state));
797 if (s->state == PA_SINK_SUSPENDED)
800 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
803 /* Called from any context - must be threadsafe */
804 void pa_sink_set_mixer_dirty(pa_sink *s, pa_bool_t is_dirty)
806 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
809 /* Called from main context */
810 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
811 pa_sink_assert_ref(s);
812 pa_assert_ctl_context();
813 pa_assert(PA_SINK_IS_LINKED(s->state));
814 pa_assert(cause != 0);
817 s->suspend_cause |= cause;
818 s->monitor_source->suspend_cause |= cause;
820 s->suspend_cause &= ~cause;
821 s->monitor_source->suspend_cause &= ~cause;
824 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
825 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
826 it'll be handled just fine. */
827 pa_sink_set_mixer_dirty(s, FALSE);
828 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
829 if (s->active_port && s->set_port) {
830 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
831 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
832 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
835 s->set_port(s, s->active_port);
845 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
848 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
850 if (s->suspend_cause)
851 return sink_set_state(s, PA_SINK_SUSPENDED);
853 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
856 /* Called from main context */
857 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
858 pa_sink_input *i, *n;
861 pa_sink_assert_ref(s);
862 pa_assert_ctl_context();
863 pa_assert(PA_SINK_IS_LINKED(s->state));
868 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
869 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
871 pa_sink_input_ref(i);
873 if (pa_sink_input_start_move(i) >= 0)
876 pa_sink_input_unref(i);
882 /* Called from main context */
883 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
886 pa_sink_assert_ref(s);
887 pa_assert_ctl_context();
888 pa_assert(PA_SINK_IS_LINKED(s->state));
891 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
892 if (pa_sink_input_finish_move(i, s, save) < 0)
893 pa_sink_input_fail_move(i);
895 pa_sink_input_unref(i);
898 pa_queue_free(q, NULL);
901 /* Called from main context */
902 void pa_sink_move_all_fail(pa_queue *q) {
905 pa_assert_ctl_context();
908 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
909 pa_sink_input_fail_move(i);
910 pa_sink_input_unref(i);
913 pa_queue_free(q, NULL);
916 /* Called from IO thread context */
917 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
921 pa_sink_assert_ref(s);
922 pa_sink_assert_io_context(s);
923 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
925 /* If nobody requested this and this is actually no real rewind
926 * then we can short cut this. Please note that this means that
927 * not all rewind requests triggered upstream will always be
928 * translated in actual requests! */
929 if (!s->thread_info.rewind_requested && nbytes <= 0)
932 s->thread_info.rewind_nbytes = 0;
933 s->thread_info.rewind_requested = FALSE;
935 if (s->thread_info.state == PA_SINK_SUSPENDED)
939 pa_log_debug("Processing rewind...");
940 if (s->flags & PA_SINK_DEFERRED_VOLUME)
941 pa_sink_volume_change_rewind(s, nbytes);
944 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
945 pa_sink_input_assert_ref(i);
946 pa_sink_input_process_rewind(i, nbytes);
950 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
951 pa_source_process_rewind(s->monitor_source, nbytes);
955 /* Called from IO thread context */
956 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
960 size_t mixlength = *length;
962 pa_sink_assert_ref(s);
963 pa_sink_assert_io_context(s);
966 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
967 pa_sink_input_assert_ref(i);
969 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
971 if (mixlength == 0 || info->chunk.length < mixlength)
972 mixlength = info->chunk.length;
974 if (pa_memblock_is_silence(info->chunk.memblock)) {
975 pa_memblock_unref(info->chunk.memblock);
979 info->userdata = pa_sink_input_ref(i);
981 pa_assert(info->chunk.memblock);
982 pa_assert(info->chunk.length > 0);
995 /* Called from IO thread context */
996 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1000 unsigned n_unreffed = 0;
1002 pa_sink_assert_ref(s);
1003 pa_sink_assert_io_context(s);
1005 pa_assert(result->memblock);
1006 pa_assert(result->length > 0);
1008 /* We optimize for the case where the order of the inputs has not changed */
1010 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1012 pa_mix_info* m = NULL;
1014 pa_sink_input_assert_ref(i);
1016 /* Let's try to find the matching entry info the pa_mix_info array */
1017 for (j = 0; j < n; j ++) {
1019 if (info[p].userdata == i) {
1029 /* Drop read data */
1030 pa_sink_input_drop(i, result->length);
1032 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1034 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1035 void *ostate = NULL;
1036 pa_source_output *o;
1039 if (m && m->chunk.memblock) {
1041 pa_memblock_ref(c.memblock);
1042 pa_assert(result->length <= c.length);
1043 c.length = result->length;
1045 pa_memchunk_make_writable(&c, 0);
1046 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1049 pa_memblock_ref(c.memblock);
1050 pa_assert(result->length <= c.length);
1051 c.length = result->length;
1054 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1055 pa_source_output_assert_ref(o);
1056 pa_assert(o->direct_on_input == i);
1057 pa_source_post_direct(s->monitor_source, o, &c);
1060 pa_memblock_unref(c.memblock);
1065 if (m->chunk.memblock)
1066 pa_memblock_unref(m->chunk.memblock);
1067 pa_memchunk_reset(&m->chunk);
1069 pa_sink_input_unref(m->userdata);
1076 /* Now drop references to entries that are included in the
1077 * pa_mix_info array but don't exist anymore */
1079 if (n_unreffed < n) {
1080 for (; n > 0; info++, n--) {
1082 pa_sink_input_unref(info->userdata);
1083 if (info->chunk.memblock)
1084 pa_memblock_unref(info->chunk.memblock);
1088 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1089 pa_source_post(s->monitor_source, result);
1092 /* Called from IO thread context */
1093 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1094 pa_mix_info info[MAX_MIX_CHANNELS];
1096 size_t block_size_max;
1098 pa_sink_assert_ref(s);
1099 pa_sink_assert_io_context(s);
1100 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1101 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1104 pa_assert(!s->thread_info.rewind_requested);
1105 pa_assert(s->thread_info.rewind_nbytes == 0);
1107 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1108 result->memblock = pa_memblock_ref(s->silence.memblock);
1109 result->index = s->silence.index;
1110 result->length = PA_MIN(s->silence.length, length);
1117 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1119 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1120 if (length > block_size_max)
1121 length = pa_frame_align(block_size_max, &s->sample_spec);
1123 pa_assert(length > 0);
1125 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1129 *result = s->silence;
1130 pa_memblock_ref(result->memblock);
1132 if (result->length > length)
1133 result->length = length;
1135 } else if (n == 1) {
1139 *result = info[0].chunk;
1140 pa_memblock_ref(result->memblock);
1142 if (result->length > length)
1143 result->length = length;
1145 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1147 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1148 pa_memblock_unref(result->memblock);
1149 pa_silence_memchunk_get(&s->core->silence_cache,
1154 } else if (!pa_cvolume_is_norm(&volume) || pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1155 pa_memchunk_make_writable(result, 0);
1156 if (pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1157 if (!pa_cvolume_is_norm(&volume))
1158 pa_volume_memchunk(result, &s->sample_spec, &volume);
1159 pa_volume_ramp_memchunk(result, &s->sample_spec, &(s->thread_info.ramp));
1162 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp)) {
1163 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target);
1164 pa_sw_cvolume_multiply(&volume, &volume, &target);
1166 pa_volume_memchunk(result, &s->sample_spec, &volume);
1171 pa_cvolume target_vol;
1173 result->memblock = pa_memblock_new(s->core->mempool, length);
1175 ptr = pa_memblock_acquire(result->memblock);
1176 result->length = pa_mix(info, n,
1179 &s->thread_info.soft_volume,
1180 s->thread_info.soft_muted);
1182 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1183 if (pa_cvolume_ramp_active(&s->thread_info.ramp))
1184 pa_volume_ramp_memchunk(result, &s->sample_spec, &(s->thread_info.ramp));
1186 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target_vol);
1187 pa_volume_memchunk(result, &s->sample_spec, &target_vol);
1191 pa_memblock_release(result->memblock);
1196 inputs_drop(s, info, n, result);
1201 /* Called from IO thread context */
1202 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1203 pa_mix_info info[MAX_MIX_CHANNELS];
1205 size_t length, block_size_max;
1207 pa_sink_assert_ref(s);
1208 pa_sink_assert_io_context(s);
1209 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1211 pa_assert(target->memblock);
1212 pa_assert(target->length > 0);
1213 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1215 pa_assert(!s->thread_info.rewind_requested);
1216 pa_assert(s->thread_info.rewind_nbytes == 0);
1218 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1219 pa_silence_memchunk(target, &s->sample_spec);
1225 length = target->length;
1226 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1227 if (length > block_size_max)
1228 length = pa_frame_align(block_size_max, &s->sample_spec);
1230 pa_assert(length > 0);
1232 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1235 if (target->length > length)
1236 target->length = length;
1238 pa_silence_memchunk(target, &s->sample_spec);
1239 } else if (n == 1) {
1242 if (target->length > length)
1243 target->length = length;
1245 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1247 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1248 pa_silence_memchunk(target, &s->sample_spec);
1251 pa_cvolume target_vol;
1253 vchunk = info[0].chunk;
1254 pa_memblock_ref(vchunk.memblock);
1256 if (vchunk.length > length)
1257 vchunk.length = length;
1259 if (!pa_cvolume_is_norm(&volume) || pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1260 pa_memchunk_make_writable(&vchunk, 0);
1261 if (pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1262 if (!pa_cvolume_is_norm(&volume))
1263 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1264 pa_volume_ramp_memchunk(&vchunk, &s->sample_spec, &(s->thread_info.ramp));
1267 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp)) {
1268 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target_vol);
1269 pa_sw_cvolume_multiply(&volume, &volume, &target_vol);
1271 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1275 pa_memchunk_memcpy(target, &vchunk);
1276 pa_memblock_unref(vchunk.memblock);
1281 pa_cvolume target_vol;
1283 ptr = pa_memblock_acquire(target->memblock);
1285 target->length = pa_mix(info, n,
1286 (uint8_t*) ptr + target->index, length,
1288 &s->thread_info.soft_volume,
1289 s->thread_info.soft_muted);
1291 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1292 if (pa_cvolume_ramp_active(&s->thread_info.ramp))
1293 pa_volume_ramp_memchunk(target, &s->sample_spec, &(s->thread_info.ramp));
1295 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target_vol);
1296 pa_volume_memchunk(target, &s->sample_spec, &target_vol);
1300 pa_memblock_release(target->memblock);
1303 inputs_drop(s, info, n, target);
1308 /* Called from IO thread context */
1309 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1313 pa_sink_assert_ref(s);
1314 pa_sink_assert_io_context(s);
1315 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1317 pa_assert(target->memblock);
1318 pa_assert(target->length > 0);
1319 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1321 pa_assert(!s->thread_info.rewind_requested);
1322 pa_assert(s->thread_info.rewind_nbytes == 0);
1324 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1325 pa_silence_memchunk(target, &s->sample_spec);
1338 pa_sink_render_into(s, &chunk);
1347 /* Called from IO thread context */
1348 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1349 pa_sink_assert_ref(s);
1350 pa_sink_assert_io_context(s);
1351 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1352 pa_assert(length > 0);
1353 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1356 pa_assert(!s->thread_info.rewind_requested);
1357 pa_assert(s->thread_info.rewind_nbytes == 0);
1361 pa_sink_render(s, length, result);
1363 if (result->length < length) {
1366 pa_memchunk_make_writable(result, length);
1368 chunk.memblock = result->memblock;
1369 chunk.index = result->index + result->length;
1370 chunk.length = length - result->length;
1372 pa_sink_render_into_full(s, &chunk);
1374 result->length = length;
1380 /* Called from main thread */
1381 pa_bool_t pa_sink_update_rate(pa_sink *s, uint32_t rate, pa_bool_t passthrough)
1383 if (s->update_rate) {
1384 uint32_t desired_rate = rate;
1385 uint32_t default_rate = s->default_sample_rate;
1386 uint32_t alternate_rate = s->alternate_sample_rate;
1389 pa_bool_t use_alternate = FALSE;
1391 if (PA_UNLIKELY(default_rate == alternate_rate)) {
1392 pa_log_warn("Default and alternate sample rates are the same.");
1396 if (PA_SINK_IS_RUNNING(s->state)) {
1397 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1398 s->sample_spec.rate);
1402 if (s->monitor_source) {
1403 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == TRUE) {
1404 pa_log_info("Cannot update rate, monitor source is RUNNING");
1409 if (PA_UNLIKELY (desired_rate < 8000 ||
1410 desired_rate > PA_RATE_MAX))
1414 pa_assert(default_rate % 4000 || default_rate % 11025);
1415 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
1417 if (default_rate % 4000) {
1418 /* default is a 11025 multiple */
1419 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1422 /* default is 4000 multiple */
1423 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1428 desired_rate = alternate_rate;
1430 desired_rate = default_rate;
1432 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1435 if (!passthrough && pa_sink_used_by(s) > 0)
1438 pa_sink_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
1440 if (s->update_rate(s, desired_rate) == TRUE) {
1441 /* update monitor source as well */
1442 if (s->monitor_source && !passthrough)
1443 pa_source_update_rate(s->monitor_source, desired_rate, FALSE);
1444 pa_log_info("Changed sampling rate successfully");
1446 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1447 if (i->state == PA_SINK_INPUT_CORKED)
1448 pa_sink_input_update_rate(i);
1457 /* Called from main thread */
1458 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1461 pa_sink_assert_ref(s);
1462 pa_assert_ctl_context();
1463 pa_assert(PA_SINK_IS_LINKED(s->state));
1465 /* The returned value is supposed to be in the time domain of the sound card! */
1467 if (s->state == PA_SINK_SUSPENDED)
1470 if (!(s->flags & PA_SINK_LATENCY))
1473 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1478 /* Called from IO thread */
1479 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1483 pa_sink_assert_ref(s);
1484 pa_sink_assert_io_context(s);
1485 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1487 /* The returned value is supposed to be in the time domain of the sound card! */
1489 if (s->thread_info.state == PA_SINK_SUSPENDED)
1492 if (!(s->flags & PA_SINK_LATENCY))
1495 o = PA_MSGOBJECT(s);
1497 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1499 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1505 /* Called from the main thread (and also from the IO thread while the main
1506 * thread is waiting).
1508 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1509 * set. Instead, flat volume mode is detected by checking whether the root sink
1510 * has the flag set. */
1511 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1512 pa_sink_assert_ref(s);
1514 s = pa_sink_get_master(s);
1517 return (s->flags & PA_SINK_FLAT_VOLUME);
1522 /* Called from the main thread (and also from the IO thread while the main
1523 * thread is waiting). */
1524 pa_sink *pa_sink_get_master(pa_sink *s) {
1525 pa_sink_assert_ref(s);
1527 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1528 if (PA_UNLIKELY(!s->input_to_master))
1531 s = s->input_to_master->sink;
1537 /* Called from main context */
1538 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1539 pa_sink_input *alt_i;
1542 pa_sink_assert_ref(s);
1544 /* one and only one PASSTHROUGH input can possibly be connected */
1545 if (pa_idxset_size(s->inputs) == 1) {
1546 alt_i = pa_idxset_first(s->inputs, &idx);
1548 if (pa_sink_input_is_passthrough(alt_i))
1555 /* Called from main context */
1556 void pa_sink_enter_passthrough(pa_sink *s) {
1559 /* disable the monitor in passthrough mode */
1560 if (s->monitor_source)
1561 pa_source_suspend(s->monitor_source, TRUE, PA_SUSPEND_PASSTHROUGH);
1563 /* set the volume to NORM */
1564 s->saved_volume = *pa_sink_get_volume(s, TRUE);
1565 s->saved_save_volume = s->save_volume;
1567 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1568 pa_sink_set_volume(s, &volume, TRUE, FALSE);
1571 /* Called from main context */
1572 void pa_sink_leave_passthrough(pa_sink *s) {
1573 /* Unsuspend monitor */
1574 if (s->monitor_source)
1575 pa_source_suspend(s->monitor_source, FALSE, PA_SUSPEND_PASSTHROUGH);
1577 /* Restore sink volume to what it was before we entered passthrough mode */
1578 pa_sink_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1580 pa_cvolume_init(&s->saved_volume);
1581 s->saved_save_volume = FALSE;
1584 /* Called from main context. */
1585 static void compute_reference_ratio(pa_sink_input *i) {
1587 pa_cvolume remapped;
1590 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1593 * Calculates the reference ratio from the sink's reference
1594 * volume. This basically calculates:
1596 * i->reference_ratio = i->volume / i->sink->reference_volume
1599 remapped = i->sink->reference_volume;
1600 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1602 i->reference_ratio.channels = i->sample_spec.channels;
1604 for (c = 0; c < i->sample_spec.channels; c++) {
1606 /* We don't update when the sink volume is 0 anyway */
1607 if (remapped.values[c] <= PA_VOLUME_MUTED)
1610 /* Don't update the reference ratio unless necessary */
1611 if (pa_sw_volume_multiply(
1612 i->reference_ratio.values[c],
1613 remapped.values[c]) == i->volume.values[c])
1616 i->reference_ratio.values[c] = pa_sw_volume_divide(
1617 i->volume.values[c],
1618 remapped.values[c]);
1622 /* Called from main context. Only called for the root sink in volume sharing
1623 * cases, except for internal recursive calls. */
1624 static void compute_reference_ratios(pa_sink *s) {
1628 pa_sink_assert_ref(s);
1629 pa_assert_ctl_context();
1630 pa_assert(PA_SINK_IS_LINKED(s->state));
1631 pa_assert(pa_sink_flat_volume_enabled(s));
1633 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1634 compute_reference_ratio(i);
1636 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1637 compute_reference_ratios(i->origin_sink);
1641 /* Called from main context. Only called for the root sink in volume sharing
1642 * cases, except for internal recursive calls. */
1643 static void compute_real_ratios(pa_sink *s) {
1647 pa_sink_assert_ref(s);
1648 pa_assert_ctl_context();
1649 pa_assert(PA_SINK_IS_LINKED(s->state));
1650 pa_assert(pa_sink_flat_volume_enabled(s));
1652 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1654 pa_cvolume remapped;
1656 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1657 /* The origin sink uses volume sharing, so this input's real ratio
1658 * is handled as a special case - the real ratio must be 0 dB, and
1659 * as a result i->soft_volume must equal i->volume_factor. */
1660 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1661 i->soft_volume = i->volume_factor;
1663 compute_real_ratios(i->origin_sink);
1669 * This basically calculates:
1671 * i->real_ratio := i->volume / s->real_volume
1672 * i->soft_volume := i->real_ratio * i->volume_factor
1675 remapped = s->real_volume;
1676 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1678 i->real_ratio.channels = i->sample_spec.channels;
1679 i->soft_volume.channels = i->sample_spec.channels;
1681 for (c = 0; c < i->sample_spec.channels; c++) {
1683 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1684 /* We leave i->real_ratio untouched */
1685 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1689 /* Don't lose accuracy unless necessary */
1690 if (pa_sw_volume_multiply(
1691 i->real_ratio.values[c],
1692 remapped.values[c]) != i->volume.values[c])
1694 i->real_ratio.values[c] = pa_sw_volume_divide(
1695 i->volume.values[c],
1696 remapped.values[c]);
1698 i->soft_volume.values[c] = pa_sw_volume_multiply(
1699 i->real_ratio.values[c],
1700 i->volume_factor.values[c]);
1703 /* We don't copy the soft_volume to the thread_info data
1704 * here. That must be done by the caller */
1708 static pa_cvolume *cvolume_remap_minimal_impact(
1710 const pa_cvolume *template,
1711 const pa_channel_map *from,
1712 const pa_channel_map *to) {
1717 pa_assert(template);
1720 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1721 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1723 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1724 * mapping from sink input to sink volumes:
1726 * If template is a possible remapping from v it is used instead
1727 * of remapping anew.
1729 * If the channel maps don't match we set an all-channel volume on
1730 * the sink to ensure that changing a volume on one stream has no
1731 * effect that cannot be compensated for in another stream that
1732 * does not have the same channel map as the sink. */
1734 if (pa_channel_map_equal(from, to))
1738 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1743 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1747 /* Called from main thread. Only called for the root sink in volume sharing
1748 * cases, except for internal recursive calls. */
1749 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1753 pa_sink_assert_ref(s);
1754 pa_assert(max_volume);
1755 pa_assert(channel_map);
1756 pa_assert(pa_sink_flat_volume_enabled(s));
1758 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1759 pa_cvolume remapped;
1761 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1762 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1764 /* Ignore this input. The origin sink uses volume sharing, so this
1765 * input's volume will be set to be equal to the root sink's real
1766 * volume. Obviously this input's current volume must not then
1767 * affect what the root sink's real volume will be. */
1771 remapped = i->volume;
1772 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1773 pa_cvolume_merge(max_volume, max_volume, &remapped);
1777 /* Called from main thread. Only called for the root sink in volume sharing
1778 * cases, except for internal recursive calls. */
1779 static pa_bool_t has_inputs(pa_sink *s) {
1783 pa_sink_assert_ref(s);
1785 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1786 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1793 /* Called from main thread. Only called for the root sink in volume sharing
1794 * cases, except for internal recursive calls. */
1795 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1799 pa_sink_assert_ref(s);
1800 pa_assert(new_volume);
1801 pa_assert(channel_map);
1803 s->real_volume = *new_volume;
1804 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1806 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1807 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1808 if (pa_sink_flat_volume_enabled(s)) {
1809 pa_cvolume old_volume = i->volume;
1811 /* Follow the root sink's real volume. */
1812 i->volume = *new_volume;
1813 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1814 compute_reference_ratio(i);
1816 /* The volume changed, let's tell people so */
1817 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1818 if (i->volume_changed)
1819 i->volume_changed(i);
1821 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1825 update_real_volume(i->origin_sink, new_volume, channel_map);
1830 /* Called from main thread. Only called for the root sink in shared volume
1832 static void compute_real_volume(pa_sink *s) {
1833 pa_sink_assert_ref(s);
1834 pa_assert_ctl_context();
1835 pa_assert(PA_SINK_IS_LINKED(s->state));
1836 pa_assert(pa_sink_flat_volume_enabled(s));
1837 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1839 /* This determines the maximum volume of all streams and sets
1840 * s->real_volume accordingly. */
1842 if (!has_inputs(s)) {
1843 /* In the special case that we have no sink inputs we leave the
1844 * volume unmodified. */
1845 update_real_volume(s, &s->reference_volume, &s->channel_map);
1849 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1851 /* First let's determine the new maximum volume of all inputs
1852 * connected to this sink */
1853 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1854 update_real_volume(s, &s->real_volume, &s->channel_map);
1856 /* Then, let's update the real ratios/soft volumes of all inputs
1857 * connected to this sink */
1858 compute_real_ratios(s);
1861 /* Called from main thread. Only called for the root sink in shared volume
1862 * cases, except for internal recursive calls. */
1863 static void propagate_reference_volume(pa_sink *s) {
1867 pa_sink_assert_ref(s);
1868 pa_assert_ctl_context();
1869 pa_assert(PA_SINK_IS_LINKED(s->state));
1870 pa_assert(pa_sink_flat_volume_enabled(s));
1872 /* This is called whenever the sink volume changes that is not
1873 * caused by a sink input volume change. We need to fix up the
1874 * sink input volumes accordingly */
1876 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1877 pa_cvolume old_volume;
1879 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1880 propagate_reference_volume(i->origin_sink);
1882 /* Since the origin sink uses volume sharing, this input's volume
1883 * needs to be updated to match the root sink's real volume, but
1884 * that will be done later in update_shared_real_volume(). */
1888 old_volume = i->volume;
1890 /* This basically calculates:
1892 * i->volume := s->reference_volume * i->reference_ratio */
1894 i->volume = s->reference_volume;
1895 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1896 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1898 /* The volume changed, let's tell people so */
1899 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1901 if (i->volume_changed)
1902 i->volume_changed(i);
1904 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1909 /* Called from main thread. Only called for the root sink in volume sharing
1910 * cases, except for internal recursive calls. The return value indicates
1911 * whether any reference volume actually changed. */
1912 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1914 pa_bool_t reference_volume_changed;
1918 pa_sink_assert_ref(s);
1919 pa_assert(PA_SINK_IS_LINKED(s->state));
1921 pa_assert(channel_map);
1922 pa_assert(pa_cvolume_valid(v));
1925 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1927 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1928 s->reference_volume = volume;
1930 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1932 if (reference_volume_changed)
1933 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1934 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1935 /* If the root sink's volume doesn't change, then there can't be any
1936 * changes in the other sinks in the sink tree either.
1938 * It's probably theoretically possible that even if the root sink's
1939 * volume changes slightly, some filter sink doesn't change its volume
1940 * due to rounding errors. If that happens, we still want to propagate
1941 * the changed root sink volume to the sinks connected to the
1942 * intermediate sink that didn't change its volume. This theoretical
1943 * possibility is the reason why we have that !(s->flags &
1944 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1945 * notice even if we returned here FALSE always if
1946 * reference_volume_changed is FALSE. */
1949 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1950 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1951 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1957 /* Called from main thread */
1958 void pa_sink_set_volume(
1960 const pa_cvolume *volume,
1964 pa_cvolume new_reference_volume;
1967 pa_sink_assert_ref(s);
1968 pa_assert_ctl_context();
1969 pa_assert(PA_SINK_IS_LINKED(s->state));
1970 pa_assert(!volume || pa_cvolume_valid(volume));
1971 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1972 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1974 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
1975 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1976 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1977 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1981 /* In case of volume sharing, the volume is set for the root sink first,
1982 * from which it's then propagated to the sharing sinks. */
1983 root_sink = pa_sink_get_master(s);
1985 if (PA_UNLIKELY(!root_sink))
1988 /* As a special exception we accept mono volumes on all sinks --
1989 * even on those with more complex channel maps */
1992 if (pa_cvolume_compatible(volume, &s->sample_spec))
1993 new_reference_volume = *volume;
1995 new_reference_volume = s->reference_volume;
1996 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1999 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2001 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2002 if (pa_sink_flat_volume_enabled(root_sink)) {
2003 /* OK, propagate this volume change back to the inputs */
2004 propagate_reference_volume(root_sink);
2006 /* And now recalculate the real volume */
2007 compute_real_volume(root_sink);
2009 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2013 /* If volume is NULL we synchronize the sink's real and
2014 * reference volumes with the stream volumes. */
2016 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2018 /* Ok, let's determine the new real volume */
2019 compute_real_volume(root_sink);
2021 /* Let's 'push' the reference volume if necessary */
2022 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2023 /* If the sink and it's root don't have the same number of channels, we need to remap */
2024 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2025 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2026 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2028 /* Now that the reference volume is updated, we can update the streams'
2029 * reference ratios. */
2030 compute_reference_ratios(root_sink);
2033 if (root_sink->set_volume) {
2034 /* If we have a function set_volume(), then we do not apply a
2035 * soft volume by default. However, set_volume() is free to
2036 * apply one to root_sink->soft_volume */
2038 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2039 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2040 root_sink->set_volume(root_sink);
2043 /* If we have no function set_volume(), then the soft volume
2044 * becomes the real volume */
2045 root_sink->soft_volume = root_sink->real_volume;
2047 /* This tells the sink that soft volume and/or real volume changed */
2049 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2052 /* Called from main thread */
2053 void pa_sink_set_volume_ramp(
2055 const pa_cvolume_ramp *ramp,
2059 pa_sink_assert_ref(s);
2060 pa_assert_ctl_context();
2061 pa_assert(PA_SINK_IS_LINKED(s->state));
2064 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2065 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2066 if (pa_sink_is_passthrough(s)) {
2067 pa_log_warn("Cannot do volume ramp, Sink is connected to PASSTHROUGH input");
2071 pa_cvolume_ramp_convert(ramp, &s->ramp, s->sample_spec.rate);
2073 /* This tells the sink that volume ramp changed */
2075 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_RAMP, NULL, 0, NULL) == 0);
2078 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2079 * Only to be called by sink implementor */
2080 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2082 pa_sink_assert_ref(s);
2083 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2085 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2086 pa_sink_assert_io_context(s);
2088 pa_assert_ctl_context();
2091 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2093 s->soft_volume = *volume;
2095 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2096 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2098 s->thread_info.soft_volume = s->soft_volume;
2101 /* Called from the main thread. Only called for the root sink in volume sharing
2102 * cases, except for internal recursive calls. */
2103 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2107 pa_sink_assert_ref(s);
2108 pa_assert(old_real_volume);
2109 pa_assert_ctl_context();
2110 pa_assert(PA_SINK_IS_LINKED(s->state));
2112 /* This is called when the hardware's real volume changes due to
2113 * some external event. We copy the real volume into our
2114 * reference volume and then rebuild the stream volumes based on
2115 * i->real_ratio which should stay fixed. */
2117 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2118 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2121 /* 1. Make the real volume the reference volume */
2122 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
2125 if (pa_sink_flat_volume_enabled(s)) {
2127 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2128 pa_cvolume old_volume = i->volume;
2130 /* 2. Since the sink's reference and real volumes are equal
2131 * now our ratios should be too. */
2132 i->reference_ratio = i->real_ratio;
2134 /* 3. Recalculate the new stream reference volume based on the
2135 * reference ratio and the sink's reference volume.
2137 * This basically calculates:
2139 * i->volume = s->reference_volume * i->reference_ratio
2141 * This is identical to propagate_reference_volume() */
2142 i->volume = s->reference_volume;
2143 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2144 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2146 /* Notify if something changed */
2147 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2149 if (i->volume_changed)
2150 i->volume_changed(i);
2152 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2155 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2156 propagate_real_volume(i->origin_sink, old_real_volume);
2160 /* Something got changed in the hardware. It probably makes sense
2161 * to save changed hw settings given that hw volume changes not
2162 * triggered by PA are almost certainly done by the user. */
2163 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2164 s->save_volume = TRUE;
2167 /* Called from io thread */
2168 void pa_sink_update_volume_and_mute(pa_sink *s) {
2170 pa_sink_assert_io_context(s);
2172 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2175 /* Called from main thread */
2176 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
2177 pa_sink_assert_ref(s);
2178 pa_assert_ctl_context();
2179 pa_assert(PA_SINK_IS_LINKED(s->state));
2181 if (s->refresh_volume || force_refresh) {
2182 struct pa_cvolume old_real_volume;
2184 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2186 old_real_volume = s->real_volume;
2188 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2191 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2193 update_real_volume(s, &s->real_volume, &s->channel_map);
2194 propagate_real_volume(s, &old_real_volume);
2197 return &s->reference_volume;
2200 /* Called from main thread. In volume sharing cases, only the root sink may
2202 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2203 pa_cvolume old_real_volume;
2205 pa_sink_assert_ref(s);
2206 pa_assert_ctl_context();
2207 pa_assert(PA_SINK_IS_LINKED(s->state));
2208 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2210 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2212 old_real_volume = s->real_volume;
2213 update_real_volume(s, new_real_volume, &s->channel_map);
2214 propagate_real_volume(s, &old_real_volume);
2217 /* Called from main thread */
2218 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
2219 pa_bool_t old_muted;
2221 pa_sink_assert_ref(s);
2222 pa_assert_ctl_context();
2223 pa_assert(PA_SINK_IS_LINKED(s->state));
2225 old_muted = s->muted;
2227 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2229 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2232 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2234 if (old_muted != s->muted)
2235 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2238 /* Called from main thread */
2239 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
2241 pa_sink_assert_ref(s);
2242 pa_assert_ctl_context();
2243 pa_assert(PA_SINK_IS_LINKED(s->state));
2245 if (s->refresh_muted || force_refresh) {
2246 pa_bool_t old_muted = s->muted;
2248 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2251 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2253 if (old_muted != s->muted) {
2254 s->save_muted = TRUE;
2256 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2258 /* Make sure the soft mute status stays in sync */
2259 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2266 /* Called from main thread */
2267 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
2268 pa_sink_assert_ref(s);
2269 pa_assert_ctl_context();
2270 pa_assert(PA_SINK_IS_LINKED(s->state));
2272 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2274 if (s->muted == new_muted)
2277 s->muted = new_muted;
2278 s->save_muted = TRUE;
2280 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2283 /* Called from main thread */
2284 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2285 pa_sink_assert_ref(s);
2286 pa_assert_ctl_context();
2289 pa_proplist_update(s->proplist, mode, p);
2291 if (PA_SINK_IS_LINKED(s->state)) {
2292 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2293 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2299 /* Called from main thread */
2300 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2301 void pa_sink_set_description(pa_sink *s, const char *description) {
2303 pa_sink_assert_ref(s);
2304 pa_assert_ctl_context();
2306 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2309 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2311 if (old && description && pa_streq(old, description))
2315 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2317 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2319 if (s->monitor_source) {
2322 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2323 pa_source_set_description(s->monitor_source, n);
2327 if (PA_SINK_IS_LINKED(s->state)) {
2328 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2329 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2333 /* Called from main thread */
2334 unsigned pa_sink_linked_by(pa_sink *s) {
2337 pa_sink_assert_ref(s);
2338 pa_assert_ctl_context();
2339 pa_assert(PA_SINK_IS_LINKED(s->state));
2341 ret = pa_idxset_size(s->inputs);
2343 /* We add in the number of streams connected to us here. Please
2344 * note the asymmetry to pa_sink_used_by()! */
2346 if (s->monitor_source)
2347 ret += pa_source_linked_by(s->monitor_source);
2352 /* Called from main thread */
2353 unsigned pa_sink_used_by(pa_sink *s) {
2356 pa_sink_assert_ref(s);
2357 pa_assert_ctl_context();
2358 pa_assert(PA_SINK_IS_LINKED(s->state));
2360 ret = pa_idxset_size(s->inputs);
2361 pa_assert(ret >= s->n_corked);
2363 /* Streams connected to our monitor source do not matter for
2364 * pa_sink_used_by()!.*/
2366 return ret - s->n_corked;
2369 /* Called from main thread */
2370 unsigned pa_sink_check_suspend(pa_sink *s) {
2375 pa_sink_assert_ref(s);
2376 pa_assert_ctl_context();
2378 if (!PA_SINK_IS_LINKED(s->state))
2383 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2384 pa_sink_input_state_t st;
2386 st = pa_sink_input_get_state(i);
2388 /* We do not assert here. It is perfectly valid for a sink input to
2389 * be in the INIT state (i.e. created, marked done but not yet put)
2390 * and we should not care if it's unlinked as it won't contribute
2391 * towards our busy status.
2393 if (!PA_SINK_INPUT_IS_LINKED(st))
2396 if (st == PA_SINK_INPUT_CORKED)
2399 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2405 if (s->monitor_source)
2406 ret += pa_source_check_suspend(s->monitor_source);
2411 /* Called from the IO thread */
2412 static void sync_input_volumes_within_thread(pa_sink *s) {
2416 pa_sink_assert_ref(s);
2417 pa_sink_assert_io_context(s);
2419 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2420 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2423 i->thread_info.soft_volume = i->soft_volume;
2424 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2428 /* Called from the IO thread. Only called for the root sink in volume sharing
2429 * cases, except for internal recursive calls. */
2430 static void set_shared_volume_within_thread(pa_sink *s) {
2431 pa_sink_input *i = NULL;
2434 pa_sink_assert_ref(s);
2436 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2438 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2439 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2440 set_shared_volume_within_thread(i->origin_sink);
2444 /* Called from IO thread, except when it is not */
2445 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2446 pa_sink *s = PA_SINK(o);
2447 pa_sink_assert_ref(s);
2449 switch ((pa_sink_message_t) code) {
2451 case PA_SINK_MESSAGE_ADD_INPUT: {
2452 pa_sink_input *i = PA_SINK_INPUT(userdata);
2454 /* If you change anything here, make sure to change the
2455 * sink input handling a few lines down at
2456 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2458 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2460 /* Since the caller sleeps in pa_sink_input_put(), we can
2461 * safely access data outside of thread_info even though
2464 if ((i->thread_info.sync_prev = i->sync_prev)) {
2465 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2466 pa_assert(i->sync_prev->sync_next == i);
2467 i->thread_info.sync_prev->thread_info.sync_next = i;
2470 if ((i->thread_info.sync_next = i->sync_next)) {
2471 pa_assert(i->sink == i->thread_info.sync_next->sink);
2472 pa_assert(i->sync_next->sync_prev == i);
2473 i->thread_info.sync_next->thread_info.sync_prev = i;
2476 pa_assert(!i->thread_info.attached);
2477 i->thread_info.attached = TRUE;
2482 pa_sink_input_set_state_within_thread(i, i->state);
2484 /* The requested latency of the sink input needs to be
2485 * fixed up and then configured on the sink */
2487 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2488 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2490 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2491 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2493 /* We don't rewind here automatically. This is left to the
2494 * sink input implementor because some sink inputs need a
2495 * slow start, i.e. need some time to buffer client
2496 * samples before beginning streaming. */
2498 /* FIXME: Actually rewinding should be requested before
2499 * updating the sink requested latency, because updating
2500 * the requested latency updates also max_rewind of the
2501 * sink. Now consider this: a sink has a 10 s buffer and
2502 * nobody has requested anything less. Then a new stream
2503 * appears while the sink buffer is full. The new stream
2504 * requests e.g. 100 ms latency. That request is forwarded
2505 * to the sink, so now max_rewind is 100 ms. When a rewind
2506 * is requested, the sink will only rewind 100 ms, and the
2507 * new stream will have to wait about 10 seconds before it
2508 * becomes audible. */
2510 /* In flat volume mode we need to update the volume as
2512 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2515 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2516 pa_sink_input *i = PA_SINK_INPUT(userdata);
2518 /* If you change anything here, make sure to change the
2519 * sink input handling a few lines down at
2520 * PA_SINK_MESSAGE_START_MOVE, too. */
2525 pa_sink_input_set_state_within_thread(i, i->state);
2527 pa_assert(i->thread_info.attached);
2528 i->thread_info.attached = FALSE;
2530 /* Since the caller sleeps in pa_sink_input_unlink(),
2531 * we can safely access data outside of thread_info even
2532 * though it is mutable */
2534 pa_assert(!i->sync_prev);
2535 pa_assert(!i->sync_next);
2537 if (i->thread_info.sync_prev) {
2538 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2539 i->thread_info.sync_prev = NULL;
2542 if (i->thread_info.sync_next) {
2543 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2544 i->thread_info.sync_next = NULL;
2547 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2548 pa_sink_input_unref(i);
2550 pa_sink_invalidate_requested_latency(s, TRUE);
2551 pa_sink_request_rewind(s, (size_t) -1);
2553 /* In flat volume mode we need to update the volume as
2555 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2558 case PA_SINK_MESSAGE_START_MOVE: {
2559 pa_sink_input *i = PA_SINK_INPUT(userdata);
2561 /* We don't support moving synchronized streams. */
2562 pa_assert(!i->sync_prev);
2563 pa_assert(!i->sync_next);
2564 pa_assert(!i->thread_info.sync_next);
2565 pa_assert(!i->thread_info.sync_prev);
2567 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2569 size_t sink_nbytes, total_nbytes;
2571 /* The old sink probably has some audio from this
2572 * stream in its buffer. We want to "take it back" as
2573 * much as possible and play it to the new sink. We
2574 * don't know at this point how much the old sink can
2575 * rewind. We have to pick something, and that
2576 * something is the full latency of the old sink here.
2577 * So we rewind the stream buffer by the sink latency
2578 * amount, which may be more than what we should
2579 * rewind. This can result in a chunk of audio being
2580 * played both to the old sink and the new sink.
2582 * FIXME: Fix this code so that we don't have to make
2583 * guesses about how much the sink will actually be
2584 * able to rewind. If someone comes up with a solution
2585 * for this, something to note is that the part of the
2586 * latency that the old sink couldn't rewind should
2587 * ideally be compensated after the stream has moved
2588 * to the new sink by adding silence. The new sink
2589 * most likely can't start playing the moved stream
2590 * immediately, and that gap should be removed from
2591 * the "compensation silence" (at least at the time of
2592 * writing this, the move finish code will actually
2593 * already take care of dropping the new sink's
2594 * unrewindable latency, so taking into account the
2595 * unrewindable latency of the old sink is the only
2598 * The render_memblockq contents are discarded,
2599 * because when the sink changes, the format of the
2600 * audio stored in the render_memblockq may change
2601 * too, making the stored audio invalid. FIXME:
2602 * However, the read and write indices are moved back
2603 * the same amount, so if they are not the same now,
2604 * they won't be the same after the rewind either. If
2605 * the write index of the render_memblockq is ahead of
2606 * the read index, then the render_memblockq will feed
2607 * the new sink some silence first, which it shouldn't
2608 * do. The write index should be flushed to be the
2609 * same as the read index. */
2611 /* Get the latency of the sink */
2612 usec = pa_sink_get_latency_within_thread(s);
2613 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2614 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2616 if (total_nbytes > 0) {
2617 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2618 i->thread_info.rewrite_flush = TRUE;
2619 pa_sink_input_process_rewind(i, sink_nbytes);
2626 pa_assert(i->thread_info.attached);
2627 i->thread_info.attached = FALSE;
2629 /* Let's remove the sink input ...*/
2630 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2631 pa_sink_input_unref(i);
2633 pa_sink_invalidate_requested_latency(s, TRUE);
2635 pa_log_debug("Requesting rewind due to started move");
2636 pa_sink_request_rewind(s, (size_t) -1);
2638 /* In flat volume mode we need to update the volume as
2640 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2643 case PA_SINK_MESSAGE_FINISH_MOVE: {
2644 pa_sink_input *i = PA_SINK_INPUT(userdata);
2646 /* We don't support moving synchronized streams. */
2647 pa_assert(!i->sync_prev);
2648 pa_assert(!i->sync_next);
2649 pa_assert(!i->thread_info.sync_next);
2650 pa_assert(!i->thread_info.sync_prev);
2652 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2654 pa_assert(!i->thread_info.attached);
2655 i->thread_info.attached = TRUE;
2660 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2664 /* In the ideal case the new sink would start playing
2665 * the stream immediately. That requires the sink to
2666 * be able to rewind all of its latency, which usually
2667 * isn't possible, so there will probably be some gap
2668 * before the moved stream becomes audible. We then
2669 * have two possibilities: 1) start playing the stream
2670 * from where it is now, or 2) drop the unrewindable
2671 * latency of the sink from the stream. With option 1
2672 * we won't lose any audio but the stream will have a
2673 * pause. With option 2 we may lose some audio but the
2674 * stream time will be somewhat in sync with the wall
2675 * clock. Lennart seems to have chosen option 2 (one
2676 * of the reasons might have been that option 1 is
2677 * actually much harder to implement), so we drop the
2678 * latency of the new sink from the moved stream and
2679 * hope that the sink will undo most of that in the
2682 /* Get the latency of the sink */
2683 usec = pa_sink_get_latency_within_thread(s);
2684 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2687 pa_sink_input_drop(i, nbytes);
2689 pa_log_debug("Requesting rewind due to finished move");
2690 pa_sink_request_rewind(s, nbytes);
2693 /* Updating the requested sink latency has to be done
2694 * after the sink rewind request, not before, because
2695 * otherwise the sink may limit the rewind amount
2698 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2699 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2701 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2702 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2704 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2707 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2708 pa_sink *root_sink = pa_sink_get_master(s);
2710 if (PA_LIKELY(root_sink))
2711 set_shared_volume_within_thread(root_sink);
2716 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2718 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2720 pa_sink_volume_change_push(s);
2722 /* Fall through ... */
2724 case PA_SINK_MESSAGE_SET_VOLUME:
2726 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2727 s->thread_info.soft_volume = s->soft_volume;
2728 pa_sink_request_rewind(s, (size_t) -1);
2730 /* Fall through ... */
2732 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2733 sync_input_volumes_within_thread(s);
2736 case PA_SINK_MESSAGE_SET_VOLUME_RAMP:
2737 /* if we have ongoing ramp where we take current start values */
2738 pa_cvolume_ramp_start_from(&s->thread_info.ramp, &s->ramp);
2739 s->thread_info.ramp = s->ramp;
2742 case PA_SINK_MESSAGE_GET_VOLUME:
2744 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2746 pa_sink_volume_change_flush(s);
2747 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2750 /* In case sink implementor reset SW volume. */
2751 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2752 s->thread_info.soft_volume = s->soft_volume;
2753 pa_sink_request_rewind(s, (size_t) -1);
2758 case PA_SINK_MESSAGE_SET_MUTE:
2760 if (s->thread_info.soft_muted != s->muted) {
2761 s->thread_info.soft_muted = s->muted;
2762 pa_sink_request_rewind(s, (size_t) -1);
2765 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2770 case PA_SINK_MESSAGE_GET_MUTE:
2772 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2777 case PA_SINK_MESSAGE_SET_STATE: {
2779 pa_bool_t suspend_change =
2780 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2781 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2783 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2785 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2786 s->thread_info.rewind_nbytes = 0;
2787 s->thread_info.rewind_requested = FALSE;
2790 if (suspend_change) {
2794 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2795 if (i->suspend_within_thread)
2796 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2802 case PA_SINK_MESSAGE_DETACH:
2804 /* Detach all streams */
2805 pa_sink_detach_within_thread(s);
2808 case PA_SINK_MESSAGE_ATTACH:
2810 /* Reattach all streams */
2811 pa_sink_attach_within_thread(s);
2814 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2816 pa_usec_t *usec = userdata;
2817 *usec = pa_sink_get_requested_latency_within_thread(s);
2819 /* Yes, that's right, the IO thread will see -1 when no
2820 * explicit requested latency is configured, the main
2821 * thread will see max_latency */
2822 if (*usec == (pa_usec_t) -1)
2823 *usec = s->thread_info.max_latency;
2828 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2829 pa_usec_t *r = userdata;
2831 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2836 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2837 pa_usec_t *r = userdata;
2839 r[0] = s->thread_info.min_latency;
2840 r[1] = s->thread_info.max_latency;
2845 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2847 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2850 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2852 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2855 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2857 *((size_t*) userdata) = s->thread_info.max_rewind;
2860 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2862 *((size_t*) userdata) = s->thread_info.max_request;
2865 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2867 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2870 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2872 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2875 case PA_SINK_MESSAGE_SET_PORT:
2877 pa_assert(userdata);
2879 struct sink_message_set_port *msg_data = userdata;
2880 msg_data->ret = s->set_port(s, msg_data->port);
2884 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2885 /* This message is sent from IO-thread and handled in main thread. */
2886 pa_assert_ctl_context();
2888 /* Make sure we're not messing with main thread when no longer linked */
2889 if (!PA_SINK_IS_LINKED(s->state))
2892 pa_sink_get_volume(s, TRUE);
2893 pa_sink_get_mute(s, TRUE);
2896 case PA_SINK_MESSAGE_GET_LATENCY:
2897 case PA_SINK_MESSAGE_MAX:
2904 /* Called from main thread */
2905 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2910 pa_core_assert_ref(c);
2911 pa_assert_ctl_context();
2912 pa_assert(cause != 0);
2914 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2917 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2924 /* Called from main thread */
2925 void pa_sink_detach(pa_sink *s) {
2926 pa_sink_assert_ref(s);
2927 pa_assert_ctl_context();
2928 pa_assert(PA_SINK_IS_LINKED(s->state));
2930 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2933 /* Called from main thread */
2934 void pa_sink_attach(pa_sink *s) {
2935 pa_sink_assert_ref(s);
2936 pa_assert_ctl_context();
2937 pa_assert(PA_SINK_IS_LINKED(s->state));
2939 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2942 /* Called from IO thread */
2943 void pa_sink_detach_within_thread(pa_sink *s) {
2947 pa_sink_assert_ref(s);
2948 pa_sink_assert_io_context(s);
2949 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2951 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2955 if (s->monitor_source)
2956 pa_source_detach_within_thread(s->monitor_source);
2959 /* Called from IO thread */
2960 void pa_sink_attach_within_thread(pa_sink *s) {
2964 pa_sink_assert_ref(s);
2965 pa_sink_assert_io_context(s);
2966 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2968 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2972 if (s->monitor_source)
2973 pa_source_attach_within_thread(s->monitor_source);
2976 /* Called from IO thread */
2977 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2978 pa_sink_assert_ref(s);
2979 pa_sink_assert_io_context(s);
2980 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2982 if (s->thread_info.state == PA_SINK_SUSPENDED)
2985 if (nbytes == (size_t) -1)
2986 nbytes = s->thread_info.max_rewind;
2988 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2990 if (s->thread_info.rewind_requested &&
2991 nbytes <= s->thread_info.rewind_nbytes)
2994 s->thread_info.rewind_nbytes = nbytes;
2995 s->thread_info.rewind_requested = TRUE;
2997 if (s->request_rewind)
2998 s->request_rewind(s);
3001 /* Called from IO thread */
3002 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3003 pa_usec_t result = (pa_usec_t) -1;
3006 pa_usec_t monitor_latency;
3008 pa_sink_assert_ref(s);
3009 pa_sink_assert_io_context(s);
3011 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3012 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3014 if (s->thread_info.requested_latency_valid)
3015 return s->thread_info.requested_latency;
3017 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3018 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3019 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3020 result = i->thread_info.requested_sink_latency;
3022 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3024 if (monitor_latency != (pa_usec_t) -1 &&
3025 (result == (pa_usec_t) -1 || result > monitor_latency))
3026 result = monitor_latency;
3028 if (result != (pa_usec_t) -1)
3029 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3031 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3032 /* Only cache if properly initialized */
3033 s->thread_info.requested_latency = result;
3034 s->thread_info.requested_latency_valid = TRUE;
3040 /* Called from main thread */
3041 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3044 pa_sink_assert_ref(s);
3045 pa_assert_ctl_context();
3046 pa_assert(PA_SINK_IS_LINKED(s->state));
3048 if (s->state == PA_SINK_SUSPENDED)
3051 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3056 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3057 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3061 pa_sink_assert_ref(s);
3062 pa_sink_assert_io_context(s);
3064 if (max_rewind == s->thread_info.max_rewind)
3067 s->thread_info.max_rewind = max_rewind;
3069 if (PA_SINK_IS_LINKED(s->thread_info.state))
3070 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3071 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3073 if (s->monitor_source)
3074 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3077 /* Called from main thread */
3078 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3079 pa_sink_assert_ref(s);
3080 pa_assert_ctl_context();
3082 if (PA_SINK_IS_LINKED(s->state))
3083 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3085 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3088 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3089 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3092 pa_sink_assert_ref(s);
3093 pa_sink_assert_io_context(s);
3095 if (max_request == s->thread_info.max_request)
3098 s->thread_info.max_request = max_request;
3100 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3103 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3104 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3108 /* Called from main thread */
3109 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3110 pa_sink_assert_ref(s);
3111 pa_assert_ctl_context();
3113 if (PA_SINK_IS_LINKED(s->state))
3114 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3116 pa_sink_set_max_request_within_thread(s, max_request);
3119 /* Called from IO thread */
3120 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
3124 pa_sink_assert_ref(s);
3125 pa_sink_assert_io_context(s);
3127 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3128 s->thread_info.requested_latency_valid = FALSE;
3132 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3134 if (s->update_requested_latency)
3135 s->update_requested_latency(s);
3137 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3138 if (i->update_sink_requested_latency)
3139 i->update_sink_requested_latency(i);
3143 /* Called from main thread */
3144 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3145 pa_sink_assert_ref(s);
3146 pa_assert_ctl_context();
3148 /* min_latency == 0: no limit
3149 * min_latency anything else: specified limit
3151 * Similar for max_latency */
3153 if (min_latency < ABSOLUTE_MIN_LATENCY)
3154 min_latency = ABSOLUTE_MIN_LATENCY;
3156 if (max_latency <= 0 ||
3157 max_latency > ABSOLUTE_MAX_LATENCY)
3158 max_latency = ABSOLUTE_MAX_LATENCY;
3160 pa_assert(min_latency <= max_latency);
3162 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3163 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3164 max_latency == ABSOLUTE_MAX_LATENCY) ||
3165 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3167 if (PA_SINK_IS_LINKED(s->state)) {
3173 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3175 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3178 /* Called from main thread */
3179 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3180 pa_sink_assert_ref(s);
3181 pa_assert_ctl_context();
3182 pa_assert(min_latency);
3183 pa_assert(max_latency);
3185 if (PA_SINK_IS_LINKED(s->state)) {
3186 pa_usec_t r[2] = { 0, 0 };
3188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3190 *min_latency = r[0];
3191 *max_latency = r[1];
3193 *min_latency = s->thread_info.min_latency;
3194 *max_latency = s->thread_info.max_latency;
3198 /* Called from IO thread */
3199 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3200 pa_sink_assert_ref(s);
3201 pa_sink_assert_io_context(s);
3203 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3204 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3205 pa_assert(min_latency <= max_latency);
3207 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3208 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3209 max_latency == ABSOLUTE_MAX_LATENCY) ||
3210 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3212 if (s->thread_info.min_latency == min_latency &&
3213 s->thread_info.max_latency == max_latency)
3216 s->thread_info.min_latency = min_latency;
3217 s->thread_info.max_latency = max_latency;
3219 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3223 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3224 if (i->update_sink_latency_range)
3225 i->update_sink_latency_range(i);
3228 pa_sink_invalidate_requested_latency(s, FALSE);
3230 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3233 /* Called from main thread */
3234 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3235 pa_sink_assert_ref(s);
3236 pa_assert_ctl_context();
3238 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3239 pa_assert(latency == 0);
3243 if (latency < ABSOLUTE_MIN_LATENCY)
3244 latency = ABSOLUTE_MIN_LATENCY;
3246 if (latency > ABSOLUTE_MAX_LATENCY)
3247 latency = ABSOLUTE_MAX_LATENCY;
3249 if (PA_SINK_IS_LINKED(s->state))
3250 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3252 s->thread_info.fixed_latency = latency;
3254 pa_source_set_fixed_latency(s->monitor_source, latency);
3257 /* Called from main thread */
3258 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3261 pa_sink_assert_ref(s);
3262 pa_assert_ctl_context();
3264 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3267 if (PA_SINK_IS_LINKED(s->state))
3268 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3270 latency = s->thread_info.fixed_latency;
3275 /* Called from IO thread */
3276 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3277 pa_sink_assert_ref(s);
3278 pa_sink_assert_io_context(s);
3280 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3281 pa_assert(latency == 0);
3285 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3286 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3288 if (s->thread_info.fixed_latency == latency)
3291 s->thread_info.fixed_latency = latency;
3293 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3297 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3298 if (i->update_sink_fixed_latency)
3299 i->update_sink_fixed_latency(i);
3302 pa_sink_invalidate_requested_latency(s, FALSE);
3304 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3307 /* Called from main context */
3308 size_t pa_sink_get_max_rewind(pa_sink *s) {
3310 pa_assert_ctl_context();
3311 pa_sink_assert_ref(s);
3313 if (!PA_SINK_IS_LINKED(s->state))
3314 return s->thread_info.max_rewind;
3316 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3321 /* Called from main context */
3322 size_t pa_sink_get_max_request(pa_sink *s) {
3324 pa_sink_assert_ref(s);
3325 pa_assert_ctl_context();
3327 if (!PA_SINK_IS_LINKED(s->state))
3328 return s->thread_info.max_request;
3330 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3335 /* Called from main context */
3336 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
3337 pa_device_port *port;
3340 pa_sink_assert_ref(s);
3341 pa_assert_ctl_context();
3344 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3345 return -PA_ERR_NOTIMPLEMENTED;
3348 if (!s->ports || !name)
3349 return -PA_ERR_NOENTITY;
3351 if (!(port = pa_hashmap_get(s->ports, name)))
3352 return -PA_ERR_NOENTITY;
3354 if (s->active_port == port) {
3355 s->save_port = s->save_port || save;
3359 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3360 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3361 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3365 ret = s->set_port(s, port);
3368 return -PA_ERR_NOENTITY;
3370 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3372 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3374 s->active_port = port;
3375 s->save_port = save;
3377 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3382 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
3383 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3387 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3390 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3392 if (pa_streq(ff, "microphone"))
3393 t = "audio-input-microphone";
3394 else if (pa_streq(ff, "webcam"))
3396 else if (pa_streq(ff, "computer"))
3398 else if (pa_streq(ff, "handset"))
3400 else if (pa_streq(ff, "portable"))
3401 t = "multimedia-player";
3402 else if (pa_streq(ff, "tv"))
3403 t = "video-display";
3406 * The following icons are not part of the icon naming spec,
3407 * because Rodney Dawes sucks as the maintainer of that spec.
3409 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3411 else if (pa_streq(ff, "headset"))
3412 t = "audio-headset";
3413 else if (pa_streq(ff, "headphone"))
3414 t = "audio-headphones";
3415 else if (pa_streq(ff, "speaker"))
3416 t = "audio-speakers";
3417 else if (pa_streq(ff, "hands-free"))
3418 t = "audio-handsfree";
3422 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3423 if (pa_streq(c, "modem"))
3430 t = "audio-input-microphone";
3433 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3434 if (strstr(profile, "analog"))
3436 else if (strstr(profile, "iec958"))
3438 else if (strstr(profile, "hdmi"))
3442 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3444 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3449 pa_bool_t pa_device_init_description(pa_proplist *p) {
3450 const char *s, *d = NULL, *k;
3453 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3456 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3457 if (pa_streq(s, "internal"))
3458 d = _("Built-in Audio");
3461 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3462 if (pa_streq(s, "modem"))
3466 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3471 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3474 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3476 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3481 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3485 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3488 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3489 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3490 || pa_streq(s, "headset")) {
3491 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3498 unsigned pa_device_init_priority(pa_proplist *p) {
3500 unsigned priority = 0;
3504 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3506 if (pa_streq(s, "sound"))
3508 else if (!pa_streq(s, "modem"))
3512 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3514 if (pa_streq(s, "internal"))
3516 else if (pa_streq(s, "speaker"))
3518 else if (pa_streq(s, "headphone"))
3522 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3524 if (pa_streq(s, "pci"))
3526 else if (pa_streq(s, "usb"))
3528 else if (pa_streq(s, "bluetooth"))
3532 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3534 if (pa_startswith(s, "analog-"))
3536 else if (pa_startswith(s, "iec958-"))
3543 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3545 /* Called from the IO thread. */
3546 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3547 pa_sink_volume_change *c;
3548 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3549 c = pa_xnew(pa_sink_volume_change, 1);
3551 PA_LLIST_INIT(pa_sink_volume_change, c);
3553 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3557 /* Called from the IO thread. */
3558 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3560 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3564 /* Called from the IO thread. */
3565 void pa_sink_volume_change_push(pa_sink *s) {
3566 pa_sink_volume_change *c = NULL;
3567 pa_sink_volume_change *nc = NULL;
3568 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3570 const char *direction = NULL;
3573 nc = pa_sink_volume_change_new(s);
3575 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3576 * Adding one more volume for HW would get us rid of this, but I am trying
3577 * to survive with the ones we already have. */
3578 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3580 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3581 pa_log_debug("Volume not changing");
3582 pa_sink_volume_change_free(nc);
3586 nc->at = pa_sink_get_latency_within_thread(s);
3587 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3589 if (s->thread_info.volume_changes_tail) {
3590 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3591 /* If volume is going up let's do it a bit late. If it is going
3592 * down let's do it a bit early. */
3593 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3594 if (nc->at + safety_margin > c->at) {
3595 nc->at += safety_margin;
3600 else if (nc->at - safety_margin > c->at) {
3601 nc->at -= safety_margin;
3609 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3610 nc->at += safety_margin;
3613 nc->at -= safety_margin;
3616 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3619 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3622 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3624 /* We can ignore volume events that came earlier but should happen later than this. */
3625 PA_LLIST_FOREACH(c, nc->next) {
3626 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3627 pa_sink_volume_change_free(c);
3630 s->thread_info.volume_changes_tail = nc;
3633 /* Called from the IO thread. */
3634 static void pa_sink_volume_change_flush(pa_sink *s) {
3635 pa_sink_volume_change *c = s->thread_info.volume_changes;
3637 s->thread_info.volume_changes = NULL;
3638 s->thread_info.volume_changes_tail = NULL;
3640 pa_sink_volume_change *next = c->next;
3641 pa_sink_volume_change_free(c);
3646 /* Called from the IO thread. */
3647 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3649 pa_bool_t ret = FALSE;
3653 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3659 pa_assert(s->write_volume);
3661 now = pa_rtclock_now();
3663 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3664 pa_sink_volume_change *c = s->thread_info.volume_changes;
3665 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3666 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3667 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3669 s->thread_info.current_hw_volume = c->hw_volume;
3670 pa_sink_volume_change_free(c);
3676 if (s->thread_info.volume_changes) {
3678 *usec_to_next = s->thread_info.volume_changes->at - now;
3679 if (pa_log_ratelimit(PA_LOG_DEBUG))
3680 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3685 s->thread_info.volume_changes_tail = NULL;
3690 /* Called from the IO thread. */
3691 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3692 /* All the queued volume events later than current latency are shifted to happen earlier. */
3693 pa_sink_volume_change *c;
3694 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3695 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3696 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3698 pa_log_debug("latency = %lld", (long long) limit);
3699 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3701 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3702 pa_usec_t modified_limit = limit;
3703 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3704 modified_limit -= s->thread_info.volume_change_safety_margin;
3706 modified_limit += s->thread_info.volume_change_safety_margin;
3707 if (c->at > modified_limit) {
3709 if (c->at < modified_limit)
3710 c->at = modified_limit;
3712 prev_vol = pa_cvolume_avg(&c->hw_volume);
3714 pa_sink_volume_change_apply(s, NULL);
3717 /* Called from the main thread */
3718 /* Gets the list of formats supported by the sink. The members and idxset must
3719 * be freed by the caller. */
3720 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3725 if (s->get_formats) {
3726 /* Sink supports format query, all is good */
3727 ret = s->get_formats(s);
3729 /* Sink doesn't support format query, so assume it does PCM */
3730 pa_format_info *f = pa_format_info_new();
3731 f->encoding = PA_ENCODING_PCM;
3733 ret = pa_idxset_new(NULL, NULL);
3734 pa_idxset_put(ret, f, NULL);
3740 /* Called from the main thread */
3741 /* Allows an external source to set what formats a sink supports if the sink
3742 * permits this. The function makes a copy of the formats on success. */
3743 pa_bool_t pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3748 /* Sink supports setting formats -- let's give it a shot */
3749 return s->set_formats(s, formats);
3751 /* Sink doesn't support setting this -- bail out */
3755 /* Called from the main thread */
3756 /* Checks if the sink can accept this format */
3757 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3759 pa_idxset *formats = NULL;
3760 pa_bool_t ret = FALSE;
3765 formats = pa_sink_get_formats(s);
3768 pa_format_info *finfo_device;
3771 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3772 if (pa_format_info_is_compatible(finfo_device, f)) {
3778 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3784 /* Called from the main thread */
3785 /* Calculates the intersection between formats supported by the sink and
3786 * in_formats, and returns these, in the order of the sink's formats. */
3787 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3788 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3789 pa_format_info *f_sink, *f_in;
3794 if (!in_formats || pa_idxset_isempty(in_formats))
3797 sink_formats = pa_sink_get_formats(s);
3799 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3800 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3801 if (pa_format_info_is_compatible(f_sink, f_in))
3802 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3808 pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);