2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (pa_page_size())
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 struct pa_sink_volume_change {
64 PA_LLIST_FIELDS(pa_sink_volume_change);
67 struct set_state_data {
68 pa_sink_state_t state;
69 pa_suspend_cause_t suspend_cause;
72 static void sink_free(pa_object *s);
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
82 data->proplist = pa_proplist_new();
83 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
92 data->name = pa_xstrdup(name);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 data->alternate_sample_rate_is_set = true;
113 data->alternate_sample_rate = alternate_sample_rate;
116 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 if ((data->volume_is_set = !!volume))
120 data->volume = *volume;
123 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
126 data->muted_is_set = true;
130 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_xfree(data->active_port);
134 data->active_port = pa_xstrdup(port);
137 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_proplist_free(data->proplist);
143 pa_hashmap_free(data->ports);
145 pa_xfree(data->name);
146 pa_xfree(data->active_port);
149 /* Called from main context */
150 static void reset_callbacks(pa_sink *s) {
153 s->set_state_in_main_thread = NULL;
154 s->set_state_in_io_thread = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->write_volume = NULL;
160 s->request_rewind = NULL;
161 s->update_requested_latency = NULL;
163 s->get_formats = NULL;
164 s->set_formats = NULL;
165 s->reconfigure = NULL;
168 /* Called from main context */
169 pa_sink* pa_sink_new(
171 pa_sink_new_data *data,
172 pa_sink_flags_t flags) {
176 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
177 pa_source_new_data source_data;
183 pa_assert(data->name);
184 pa_assert_ctl_context();
186 s = pa_msgobject_new(pa_sink);
188 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
189 pa_log_debug("Failed to register name %s.", data->name);
194 pa_sink_new_data_set_name(data, name);
196 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
198 pa_namereg_unregister(core, name);
202 /* FIXME, need to free s here on failure */
204 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
205 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
207 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
209 if (!data->channel_map_is_set)
210 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
212 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
213 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
215 /* FIXME: There should probably be a general function for checking whether
216 * the sink volume is allowed to be set, like there is for sink inputs. */
217 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
219 if (!data->volume_is_set) {
220 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
221 data->save_volume = false;
224 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
225 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
227 if (!data->muted_is_set)
231 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
233 pa_device_init_description(data->proplist, data->card);
234 pa_device_init_icon(data->proplist, true);
235 pa_device_init_intended_roles(data->proplist);
237 if (!data->active_port) {
238 pa_device_port *p = pa_device_port_find_best(data->ports);
240 pa_sink_new_data_set_port(data, p->name);
243 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
245 pa_namereg_unregister(core, name);
249 s->parent.parent.free = sink_free;
250 s->parent.process_msg = pa_sink_process_msg;
253 s->state = PA_SINK_INIT;
256 s->suspend_cause = data->suspend_cause;
257 s->name = pa_xstrdup(name);
258 s->proplist = pa_proplist_copy(data->proplist);
259 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
260 s->module = data->module;
261 s->card = data->card;
263 s->priority = pa_device_init_priority(s->proplist);
265 s->sample_spec = data->sample_spec;
266 s->channel_map = data->channel_map;
267 s->default_sample_rate = s->sample_spec.rate;
269 if (data->alternate_sample_rate_is_set)
270 s->alternate_sample_rate = data->alternate_sample_rate;
272 s->alternate_sample_rate = s->core->alternate_sample_rate;
274 s->avoid_resampling = data->avoid_resampling;
276 s->inputs = pa_idxset_new(NULL, NULL);
278 s->input_to_master = NULL;
280 s->reference_volume = s->real_volume = data->volume;
281 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
282 s->base_volume = PA_VOLUME_NORM;
283 s->n_volume_steps = PA_VOLUME_NORM+1;
284 s->muted = data->muted;
285 s->refresh_volume = s->refresh_muted = false;
292 /* As a minor optimization we just steal the list instead of
294 s->ports = data->ports;
297 s->active_port = NULL;
298 s->save_port = false;
300 if (data->active_port)
301 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
302 s->save_port = data->save_port;
304 /* Hopefully the active port has already been assigned in the previous call
305 to pa_device_port_find_best, but better safe than sorry */
307 s->active_port = pa_device_port_find_best(s->ports);
310 s->port_latency_offset = s->active_port->latency_offset;
312 s->port_latency_offset = 0;
314 s->save_volume = data->save_volume;
315 s->save_muted = data->save_muted;
317 pa_silence_memchunk_get(
318 &core->silence_cache,
324 s->thread_info.rtpoll = NULL;
325 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
326 (pa_free_cb_t) pa_sink_input_unref);
327 s->thread_info.soft_volume = s->soft_volume;
328 s->thread_info.soft_muted = s->muted;
329 s->thread_info.state = s->state;
330 s->thread_info.rewind_nbytes = 0;
331 s->thread_info.rewind_requested = false;
332 s->thread_info.max_rewind = 0;
333 s->thread_info.max_request = 0;
334 s->thread_info.requested_latency_valid = false;
335 s->thread_info.requested_latency = 0;
336 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
337 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
338 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
340 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
341 s->thread_info.volume_changes_tail = NULL;
342 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
343 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
344 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
345 s->thread_info.port_latency_offset = s->port_latency_offset;
347 /* FIXME: This should probably be moved to pa_sink_put() */
348 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
351 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
353 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
354 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
357 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
358 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
362 pa_source_new_data_init(&source_data);
363 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
364 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
365 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
366 source_data.name = pa_sprintf_malloc("%s.monitor", name);
367 source_data.driver = data->driver;
368 source_data.module = data->module;
369 source_data.card = data->card;
370 source_data.avoid_resampling = data->avoid_resampling;
372 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
373 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
374 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
376 s->monitor_source = pa_source_new(core, &source_data,
377 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
378 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
380 pa_source_new_data_done(&source_data);
382 if (!s->monitor_source) {
388 s->monitor_source->monitor_of = s;
390 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
391 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
392 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
397 /* Called from main context */
398 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
401 bool suspend_cause_changed;
404 pa_sink_state_t old_state;
405 pa_suspend_cause_t old_suspend_cause;
408 pa_assert_ctl_context();
410 state_changed = state != s->state;
411 suspend_cause_changed = suspend_cause != s->suspend_cause;
413 if (!state_changed && !suspend_cause_changed)
416 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
417 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
419 /* If we are resuming, suspend_cause must be 0. */
420 pa_assert(!resuming || !suspend_cause);
422 /* Here's something to think about: what to do with the suspend cause if
423 * resuming the sink fails? The old suspend cause will be incorrect, so we
424 * can't use that. On the other hand, if we set no suspend cause (as is the
425 * case currently), then it looks strange to have a sink suspended without
426 * any cause. It might be a good idea to add a new "resume failed" suspend
427 * cause, or it might just add unnecessary complexity, given that the
428 * current approach of not setting any suspend cause works well enough. */
430 if (s->set_state_in_main_thread) {
431 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
432 /* set_state_in_main_thread() is allowed to fail only when resuming. */
435 /* If resuming fails, we set the state to SUSPENDED and
436 * suspend_cause to 0. */
437 state = PA_SINK_SUSPENDED;
439 state_changed = false;
440 suspend_cause_changed = suspend_cause != s->suspend_cause;
443 /* We know the state isn't changing. If the suspend cause isn't
444 * changing either, then there's nothing more to do. */
445 if (!suspend_cause_changed)
451 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
453 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
454 /* SET_STATE is allowed to fail only when resuming. */
457 if (s->set_state_in_main_thread)
458 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
460 /* If resuming fails, we set the state to SUSPENDED and
461 * suspend_cause to 0. */
462 state = PA_SINK_SUSPENDED;
464 state_changed = false;
465 suspend_cause_changed = suspend_cause != s->suspend_cause;
468 /* We know the state isn't changing. If the suspend cause isn't
469 * changing either, then there's nothing more to do. */
470 if (!suspend_cause_changed)
475 old_suspend_cause = s->suspend_cause;
476 if (suspend_cause_changed) {
477 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
478 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
480 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
481 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
482 s->suspend_cause = suspend_cause;
485 old_state = s->state;
487 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
490 /* If we enter UNLINKED state, then we don't send change notifications.
491 * pa_sink_unlink() will send unlink notifications instead. */
492 if (state != PA_SINK_UNLINKED) {
493 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
494 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
498 if (suspending || resuming || suspend_cause_changed) {
502 /* We're suspending or resuming, tell everyone about it */
504 PA_IDXSET_FOREACH(i, s->inputs, idx)
505 if (s->state == PA_SINK_SUSPENDED &&
506 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
507 pa_sink_input_kill(i);
509 i->suspend(i, old_state, old_suspend_cause);
512 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
513 pa_source_sync_suspend(s->monitor_source);
518 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
524 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
525 pa_sink_flags_t flags;
528 pa_assert(!s->write_volume || cb);
532 /* Save the current flags so we can tell if they've changed */
536 /* The sink implementor is responsible for setting decibel volume support */
537 s->flags |= PA_SINK_HW_VOLUME_CTRL;
539 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
540 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
541 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
544 /* If the flags have changed after init, let any clients know via a change event */
545 if (s->state != PA_SINK_INIT && flags != s->flags)
546 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
549 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
550 pa_sink_flags_t flags;
553 pa_assert(!cb || s->set_volume);
555 s->write_volume = cb;
557 /* Save the current flags so we can tell if they've changed */
561 s->flags |= PA_SINK_DEFERRED_VOLUME;
563 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
565 /* If the flags have changed after init, let any clients know via a change event */
566 if (s->state != PA_SINK_INIT && flags != s->flags)
567 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
570 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
576 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
577 pa_sink_flags_t flags;
583 /* Save the current flags so we can tell if they've changed */
587 s->flags |= PA_SINK_HW_MUTE_CTRL;
589 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
591 /* If the flags have changed after init, let any clients know via a change event */
592 if (s->state != PA_SINK_INIT && flags != s->flags)
593 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
596 static void enable_flat_volume(pa_sink *s, bool enable) {
597 pa_sink_flags_t flags;
601 /* Always follow the overall user preference here */
602 enable = enable && s->core->flat_volumes;
604 /* Save the current flags so we can tell if they've changed */
608 s->flags |= PA_SINK_FLAT_VOLUME;
610 s->flags &= ~PA_SINK_FLAT_VOLUME;
612 /* If the flags have changed after init, let any clients know via a change event */
613 if (s->state != PA_SINK_INIT && flags != s->flags)
614 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
617 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
618 pa_sink_flags_t flags;
622 /* Save the current flags so we can tell if they've changed */
626 s->flags |= PA_SINK_DECIBEL_VOLUME;
627 enable_flat_volume(s, true);
629 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
630 enable_flat_volume(s, false);
633 /* If the flags have changed after init, let any clients know via a change event */
634 if (s->state != PA_SINK_INIT && flags != s->flags)
635 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
638 /* Called from main context */
639 void pa_sink_put(pa_sink* s) {
640 pa_sink_assert_ref(s);
641 pa_assert_ctl_context();
643 pa_assert(s->state == PA_SINK_INIT);
644 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
646 /* The following fields must be initialized properly when calling _put() */
647 pa_assert(s->asyncmsgq);
648 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
650 /* Generally, flags should be initialized via pa_sink_new(). As a
651 * special exception we allow some volume related flags to be set
652 * between _new() and _put() by the callback setter functions above.
654 * Thus we implement a couple safeguards here which ensure the above
655 * setters were used (or at least the implementor made manual changes
656 * in a compatible way).
658 * Note: All of these flags set here can change over the life time
660 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
661 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
662 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
664 /* XXX: Currently decibel volume is disabled for all sinks that use volume
665 * sharing. When the master sink supports decibel volume, it would be good
666 * to have the flag also in the filter sink, but currently we don't do that
667 * so that the flags of the filter sink never change when it's moved from
668 * a master sink to another. One solution for this problem would be to
669 * remove user-visible volume altogether from filter sinks when volume
670 * sharing is used, but the current approach was easier to implement... */
671 /* We always support decibel volumes in software, otherwise we leave it to
672 * the sink implementor to set this flag as needed.
674 * Note: This flag can also change over the life time of the sink. */
675 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
676 pa_sink_enable_decibel_volume(s, true);
677 s->soft_volume = s->reference_volume;
680 /* If the sink implementor support DB volumes by itself, we should always
681 * try and enable flat volumes too */
682 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
683 enable_flat_volume(s, true);
685 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
686 pa_sink *root_sink = pa_sink_get_master(s);
688 pa_assert(root_sink);
690 s->reference_volume = root_sink->reference_volume;
691 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
693 s->real_volume = root_sink->real_volume;
694 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
696 /* We assume that if the sink implementor changed the default
697 * volume he did so in real_volume, because that is the usual
698 * place where he is supposed to place his changes. */
699 s->reference_volume = s->real_volume;
701 s->thread_info.soft_volume = s->soft_volume;
702 s->thread_info.soft_muted = s->muted;
703 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
705 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
706 || (s->base_volume == PA_VOLUME_NORM
707 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
708 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
709 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
710 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
711 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
713 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
714 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
715 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
717 if (s->suspend_cause)
718 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
720 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
722 pa_source_put(s->monitor_source);
724 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
725 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
727 /* It's good to fire the SINK_PUT hook before updating the default sink,
728 * because module-switch-on-connect will set the new sink as the default
729 * sink, and if we were to call pa_core_update_default_sink() before that,
730 * the default sink might change twice, causing unnecessary stream moving. */
732 pa_core_update_default_sink(s->core);
734 pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
737 /* Called from main context */
738 void pa_sink_unlink(pa_sink* s) {
740 pa_sink_input *i, PA_UNUSED *j = NULL;
742 pa_sink_assert_ref(s);
743 pa_assert_ctl_context();
745 /* Please note that pa_sink_unlink() does more than simply
746 * reversing pa_sink_put(). It also undoes the registrations
747 * already done in pa_sink_new()! */
749 if (s->unlink_requested)
752 s->unlink_requested = true;
754 linked = PA_SINK_IS_LINKED(s->state);
757 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
759 if (s->state != PA_SINK_UNLINKED)
760 pa_namereg_unregister(s->core, s->name);
761 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
763 pa_core_update_default_sink(s->core);
766 pa_sink_move_streams_to_default_sink(s->core, s, false);
769 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
771 while ((i = pa_idxset_first(s->inputs, NULL))) {
773 pa_sink_input_kill(i);
778 /* It's important to keep the suspend cause unchanged when unlinking,
779 * because if we remove the SESSION suspend cause here, the alsa sink
780 * will sync its volume with the hardware while another user is
781 * active, messing up the volume for that other user. */
782 sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
784 s->state = PA_SINK_UNLINKED;
788 if (s->monitor_source)
789 pa_source_unlink(s->monitor_source);
792 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
793 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
797 /* Called from main context */
798 static void sink_free(pa_object *o) {
799 pa_sink *s = PA_SINK(o);
802 pa_assert_ctl_context();
803 pa_assert(pa_sink_refcnt(s) == 0);
804 pa_assert(!PA_SINK_IS_LINKED(s->state));
806 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
808 pa_sink_volume_change_flush(s);
810 if (s->monitor_source) {
811 pa_source_unref(s->monitor_source);
812 s->monitor_source = NULL;
815 pa_idxset_free(s->inputs, NULL);
816 pa_hashmap_free(s->thread_info.inputs);
818 if (s->silence.memblock)
819 pa_memblock_unref(s->silence.memblock);
825 pa_proplist_free(s->proplist);
828 pa_hashmap_free(s->ports);
833 /* Called from main context, and not while the IO thread is active, please */
834 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
835 pa_sink_assert_ref(s);
836 pa_assert_ctl_context();
840 if (s->monitor_source)
841 pa_source_set_asyncmsgq(s->monitor_source, q);
844 /* Called from main context, and not while the IO thread is active, please */
845 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
846 pa_sink_flags_t old_flags;
847 pa_sink_input *input;
850 pa_sink_assert_ref(s);
851 pa_assert_ctl_context();
853 /* For now, allow only a minimal set of flags to be changed. */
854 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
856 old_flags = s->flags;
857 s->flags = (s->flags & ~mask) | (value & mask);
859 if (s->flags == old_flags)
862 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
863 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
865 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
866 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
867 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
869 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
870 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
872 if (s->monitor_source)
873 pa_source_update_flags(s->monitor_source,
874 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
875 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
876 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
877 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
879 PA_IDXSET_FOREACH(input, s->inputs, idx) {
880 if (input->origin_sink)
881 pa_sink_update_flags(input->origin_sink, mask, value);
885 /* Called from IO context, or before _put() from main context */
886 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
887 pa_sink_assert_ref(s);
888 pa_sink_assert_io_context(s);
890 s->thread_info.rtpoll = p;
892 if (s->monitor_source)
893 pa_source_set_rtpoll(s->monitor_source, p);
896 /* Called from main context */
897 int pa_sink_update_status(pa_sink*s) {
898 pa_sink_assert_ref(s);
899 pa_assert_ctl_context();
900 pa_assert(PA_SINK_IS_LINKED(s->state));
902 if (s->state == PA_SINK_SUSPENDED)
905 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
908 /* Called from main context */
909 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
910 pa_suspend_cause_t merged_cause;
912 pa_sink_assert_ref(s);
913 pa_assert_ctl_context();
914 pa_assert(PA_SINK_IS_LINKED(s->state));
915 pa_assert(cause != 0);
918 merged_cause = s->suspend_cause | cause;
920 merged_cause = s->suspend_cause & ~cause;
923 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
925 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
928 /* Called from main context */
929 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
930 pa_sink_input *i, *n;
933 pa_sink_assert_ref(s);
934 pa_assert_ctl_context();
935 pa_assert(PA_SINK_IS_LINKED(s->state));
940 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
941 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
943 pa_sink_input_ref(i);
945 if (pa_sink_input_start_move(i) >= 0)
948 pa_sink_input_unref(i);
954 /* Called from main context */
955 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
958 pa_sink_assert_ref(s);
959 pa_assert_ctl_context();
960 pa_assert(PA_SINK_IS_LINKED(s->state));
963 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
964 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
965 if (pa_sink_input_finish_move(i, s, save) < 0)
966 pa_sink_input_fail_move(i);
969 pa_sink_input_unref(i);
972 pa_queue_free(q, NULL);
975 /* Called from main context */
976 void pa_sink_move_all_fail(pa_queue *q) {
979 pa_assert_ctl_context();
982 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
983 pa_sink_input_fail_move(i);
984 pa_sink_input_unref(i);
987 pa_queue_free(q, NULL);
990 /* Called from IO thread context */
991 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
996 pa_sink_assert_ref(s);
997 pa_sink_assert_io_context(s);
999 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1000 size_t uf = i->thread_info.underrun_for_sink;
1002 /* Propagate down the filter tree */
1003 if (i->origin_sink) {
1004 size_t filter_result, left_to_play_origin;
1006 /* The recursive call works in the origin sink domain ... */
1007 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1009 /* .. and returns the time to sleep before waking up. We need the
1010 * underrun duration for comparisons, so we undo the subtraction on
1011 * the return value... */
1012 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1014 /* ... and convert it back to the master sink domain */
1015 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1017 /* Remember the longest underrun so far */
1018 if (filter_result > result)
1019 result = filter_result;
1023 /* No underrun here, move on */
1025 } else if (uf >= left_to_play) {
1026 /* The sink has possibly consumed all the data the sink input provided */
1027 pa_sink_input_process_underrun(i);
1028 } else if (uf > result) {
1029 /* Remember the longest underrun so far */
1035 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1036 (long) result, (long) left_to_play - result);
1037 return left_to_play - result;
1040 /* Called from IO thread context */
1041 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1045 pa_sink_assert_ref(s);
1046 pa_sink_assert_io_context(s);
1047 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1049 /* If nobody requested this and this is actually no real rewind
1050 * then we can short cut this. Please note that this means that
1051 * not all rewind requests triggered upstream will always be
1052 * translated in actual requests! */
1053 if (!s->thread_info.rewind_requested && nbytes <= 0)
1056 s->thread_info.rewind_nbytes = 0;
1057 s->thread_info.rewind_requested = false;
1060 pa_log_debug("Processing rewind...");
1061 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1062 pa_sink_volume_change_rewind(s, nbytes);
1065 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1066 pa_sink_input_assert_ref(i);
1067 pa_sink_input_process_rewind(i, nbytes);
1071 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1072 pa_source_process_rewind(s->monitor_source, nbytes);
1076 /* Called from IO thread context */
1077 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1081 size_t mixlength = *length;
1083 pa_sink_assert_ref(s);
1084 pa_sink_assert_io_context(s);
1087 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1088 pa_sink_input_assert_ref(i);
1090 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1092 if (mixlength == 0 || info->chunk.length < mixlength)
1093 mixlength = info->chunk.length;
1095 if (pa_memblock_is_silence(info->chunk.memblock)) {
1096 pa_memblock_unref(info->chunk.memblock);
1100 info->userdata = pa_sink_input_ref(i);
1102 pa_assert(info->chunk.memblock);
1103 pa_assert(info->chunk.length > 0);
1111 *length = mixlength;
1116 /* Called from IO thread context */
1117 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1121 unsigned n_unreffed = 0;
1123 pa_sink_assert_ref(s);
1124 pa_sink_assert_io_context(s);
1126 pa_assert(result->memblock);
1127 pa_assert(result->length > 0);
1129 /* We optimize for the case where the order of the inputs has not changed */
1131 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1133 pa_mix_info* m = NULL;
1135 pa_sink_input_assert_ref(i);
1137 /* Let's try to find the matching entry info the pa_mix_info array */
1138 for (j = 0; j < n; j ++) {
1140 if (info[p].userdata == i) {
1150 /* Drop read data */
1151 pa_sink_input_drop(i, result->length);
1153 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1155 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1156 void *ostate = NULL;
1157 pa_source_output *o;
1160 if (m && m->chunk.memblock) {
1162 pa_memblock_ref(c.memblock);
1163 pa_assert(result->length <= c.length);
1164 c.length = result->length;
1166 pa_memchunk_make_writable(&c, 0);
1167 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1170 pa_memblock_ref(c.memblock);
1171 pa_assert(result->length <= c.length);
1172 c.length = result->length;
1175 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1176 pa_source_output_assert_ref(o);
1177 pa_assert(o->direct_on_input == i);
1178 pa_source_post_direct(s->monitor_source, o, &c);
1181 pa_memblock_unref(c.memblock);
1186 if (m->chunk.memblock) {
1187 pa_memblock_unref(m->chunk.memblock);
1188 pa_memchunk_reset(&m->chunk);
1191 pa_sink_input_unref(m->userdata);
1198 /* Now drop references to entries that are included in the
1199 * pa_mix_info array but don't exist anymore */
1201 if (n_unreffed < n) {
1202 for (; n > 0; info++, n--) {
1204 pa_sink_input_unref(info->userdata);
1205 if (info->chunk.memblock)
1206 pa_memblock_unref(info->chunk.memblock);
1210 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1211 pa_source_post(s->monitor_source, result);
1214 /* Called from IO thread context */
1215 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1216 pa_mix_info info[MAX_MIX_CHANNELS];
1218 size_t block_size_max;
1220 pa_sink_assert_ref(s);
1221 pa_sink_assert_io_context(s);
1222 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1223 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1226 pa_assert(!s->thread_info.rewind_requested);
1227 pa_assert(s->thread_info.rewind_nbytes == 0);
1229 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1230 result->memblock = pa_memblock_ref(s->silence.memblock);
1231 result->index = s->silence.index;
1232 result->length = PA_MIN(s->silence.length, length);
1239 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1241 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1242 if (length > block_size_max)
1243 length = pa_frame_align(block_size_max, &s->sample_spec);
1245 pa_assert(length > 0);
1247 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1251 *result = s->silence;
1252 pa_memblock_ref(result->memblock);
1254 if (result->length > length)
1255 result->length = length;
1257 } else if (n == 1) {
1260 *result = info[0].chunk;
1261 pa_memblock_ref(result->memblock);
1263 if (result->length > length)
1264 result->length = length;
1266 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1268 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1269 pa_memblock_unref(result->memblock);
1270 pa_silence_memchunk_get(&s->core->silence_cache,
1275 } else if (!pa_cvolume_is_norm(&volume)) {
1276 pa_memchunk_make_writable(result, 0);
1277 pa_volume_memchunk(result, &s->sample_spec, &volume);
1281 result->memblock = pa_memblock_new(s->core->mempool, length);
1283 ptr = pa_memblock_acquire(result->memblock);
1284 result->length = pa_mix(info, n,
1287 &s->thread_info.soft_volume,
1288 s->thread_info.soft_muted);
1289 pa_memblock_release(result->memblock);
1294 inputs_drop(s, info, n, result);
1299 /* Called from IO thread context */
1300 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1301 pa_mix_info info[MAX_MIX_CHANNELS];
1303 size_t length, block_size_max;
1305 pa_sink_assert_ref(s);
1306 pa_sink_assert_io_context(s);
1307 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1309 pa_assert(target->memblock);
1310 pa_assert(target->length > 0);
1311 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1313 pa_assert(!s->thread_info.rewind_requested);
1314 pa_assert(s->thread_info.rewind_nbytes == 0);
1316 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1317 pa_silence_memchunk(target, &s->sample_spec);
1323 length = target->length;
1324 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1325 if (length > block_size_max)
1326 length = pa_frame_align(block_size_max, &s->sample_spec);
1328 pa_assert(length > 0);
1330 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1333 if (target->length > length)
1334 target->length = length;
1336 pa_silence_memchunk(target, &s->sample_spec);
1337 } else if (n == 1) {
1340 if (target->length > length)
1341 target->length = length;
1343 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1345 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1346 pa_silence_memchunk(target, &s->sample_spec);
1350 vchunk = info[0].chunk;
1351 pa_memblock_ref(vchunk.memblock);
1353 if (vchunk.length > length)
1354 vchunk.length = length;
1356 if (!pa_cvolume_is_norm(&volume)) {
1357 pa_memchunk_make_writable(&vchunk, 0);
1358 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1361 pa_memchunk_memcpy(target, &vchunk);
1362 pa_memblock_unref(vchunk.memblock);
1368 ptr = pa_memblock_acquire(target->memblock);
1370 target->length = pa_mix(info, n,
1371 (uint8_t*) ptr + target->index, length,
1373 &s->thread_info.soft_volume,
1374 s->thread_info.soft_muted);
1376 pa_memblock_release(target->memblock);
1379 inputs_drop(s, info, n, target);
1384 /* Called from IO thread context */
1385 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1389 pa_sink_assert_ref(s);
1390 pa_sink_assert_io_context(s);
1391 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1393 pa_assert(target->memblock);
1394 pa_assert(target->length > 0);
1395 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1397 pa_assert(!s->thread_info.rewind_requested);
1398 pa_assert(s->thread_info.rewind_nbytes == 0);
1400 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1401 pa_silence_memchunk(target, &s->sample_spec);
1414 pa_sink_render_into(s, &chunk);
1423 /* Called from IO thread context */
1424 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1425 pa_sink_assert_ref(s);
1426 pa_sink_assert_io_context(s);
1427 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1428 pa_assert(length > 0);
1429 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1432 pa_assert(!s->thread_info.rewind_requested);
1433 pa_assert(s->thread_info.rewind_nbytes == 0);
1437 pa_sink_render(s, length, result);
1439 if (result->length < length) {
1442 pa_memchunk_make_writable(result, length);
1444 chunk.memblock = result->memblock;
1445 chunk.index = result->index + result->length;
1446 chunk.length = length - result->length;
1448 pa_sink_render_into_full(s, &chunk);
1450 result->length = length;
1456 /* Called from main thread */
1457 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1458 pa_sample_spec desired_spec;
1459 uint32_t default_rate = s->default_sample_rate;
1460 uint32_t alternate_rate = s->alternate_sample_rate;
1463 bool default_rate_is_usable = false;
1464 bool alternate_rate_is_usable = false;
1465 bool avoid_resampling = s->avoid_resampling;
1467 if (pa_sample_spec_equal(spec, &s->sample_spec))
1470 if (!s->reconfigure)
1473 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1474 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1478 if (PA_SINK_IS_RUNNING(s->state)) {
1479 pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1480 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1484 if (s->monitor_source) {
1485 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1486 pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1491 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1494 desired_spec = s->sample_spec;
1497 /* We have to try to use the sink input format and rate */
1498 desired_spec.format = spec->format;
1499 desired_spec.rate = spec->rate;
1501 } else if (avoid_resampling) {
1502 /* We just try to set the sink input's sample rate if it's not too low */
1503 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1504 desired_spec.rate = spec->rate;
1505 desired_spec.format = spec->format;
1507 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1508 /* We can directly try to use this rate */
1509 desired_spec.rate = spec->rate;
1513 if (desired_spec.rate != spec->rate) {
1514 /* See if we can pick a rate that results in less resampling effort */
1515 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1516 default_rate_is_usable = true;
1517 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1518 default_rate_is_usable = true;
1519 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1520 alternate_rate_is_usable = true;
1521 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1522 alternate_rate_is_usable = true;
1524 if (alternate_rate_is_usable && !default_rate_is_usable)
1525 desired_spec.rate = alternate_rate;
1527 desired_spec.rate = default_rate;
1530 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1533 if (!passthrough && pa_sink_used_by(s) > 0)
1536 pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1537 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1538 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1540 s->reconfigure(s, &desired_spec, passthrough);
1542 /* update monitor source as well */
1543 if (s->monitor_source && !passthrough)
1544 pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1545 pa_log_info("Reconfigured successfully");
1547 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1548 if (i->state == PA_SINK_INPUT_CORKED)
1549 pa_sink_input_update_resampler(i);
1552 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1555 /* Called from main thread */
1556 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1559 pa_sink_assert_ref(s);
1560 pa_assert_ctl_context();
1561 pa_assert(PA_SINK_IS_LINKED(s->state));
1563 /* The returned value is supposed to be in the time domain of the sound card! */
1565 if (s->state == PA_SINK_SUSPENDED)
1568 if (!(s->flags & PA_SINK_LATENCY))
1571 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1573 /* the return value is unsigned, so check that the offset can be added to usec without
1575 if (-s->port_latency_offset <= usec)
1576 usec += s->port_latency_offset;
1580 return (pa_usec_t)usec;
1583 /* Called from IO thread */
1584 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1588 pa_sink_assert_ref(s);
1589 pa_sink_assert_io_context(s);
1590 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1592 /* The returned value is supposed to be in the time domain of the sound card! */
1594 if (s->thread_info.state == PA_SINK_SUSPENDED)
1597 if (!(s->flags & PA_SINK_LATENCY))
1600 o = PA_MSGOBJECT(s);
1602 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1604 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1606 /* If allow_negative is false, the call should only return positive values, */
1607 usec += s->thread_info.port_latency_offset;
1608 if (!allow_negative && usec < 0)
1614 /* Called from the main thread (and also from the IO thread while the main
1615 * thread is waiting).
1617 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1618 * set. Instead, flat volume mode is detected by checking whether the root sink
1619 * has the flag set. */
1620 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1621 pa_sink_assert_ref(s);
1623 s = pa_sink_get_master(s);
1626 return (s->flags & PA_SINK_FLAT_VOLUME);
1631 /* Called from the main thread (and also from the IO thread while the main
1632 * thread is waiting). */
1633 pa_sink *pa_sink_get_master(pa_sink *s) {
1634 pa_sink_assert_ref(s);
1636 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1637 if (PA_UNLIKELY(!s->input_to_master))
1640 s = s->input_to_master->sink;
1646 /* Called from main context */
1647 bool pa_sink_is_filter(pa_sink *s) {
1648 pa_sink_assert_ref(s);
1650 return (s->input_to_master != NULL);
1653 /* Called from main context */
1654 bool pa_sink_is_passthrough(pa_sink *s) {
1655 pa_sink_input *alt_i;
1658 pa_sink_assert_ref(s);
1660 /* one and only one PASSTHROUGH input can possibly be connected */
1661 if (pa_idxset_size(s->inputs) == 1) {
1662 alt_i = pa_idxset_first(s->inputs, &idx);
1664 if (pa_sink_input_is_passthrough(alt_i))
1671 /* Called from main context */
1672 void pa_sink_enter_passthrough(pa_sink *s) {
1675 /* The sink implementation is reconfigured for passthrough in
1676 * pa_sink_reconfigure(). This function sets the PA core objects to
1677 * passthrough mode. */
1679 /* disable the monitor in passthrough mode */
1680 if (s->monitor_source) {
1681 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1682 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1685 /* set the volume to NORM */
1686 s->saved_volume = *pa_sink_get_volume(s, true);
1687 s->saved_save_volume = s->save_volume;
1689 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1690 pa_sink_set_volume(s, &volume, true, false);
1692 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1695 /* Called from main context */
1696 void pa_sink_leave_passthrough(pa_sink *s) {
1697 /* Unsuspend monitor */
1698 if (s->monitor_source) {
1699 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1700 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1703 /* Restore sink volume to what it was before we entered passthrough mode */
1704 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1706 pa_cvolume_init(&s->saved_volume);
1707 s->saved_save_volume = false;
1711 /* Called from main context. */
1712 static void compute_reference_ratio(pa_sink_input *i) {
1714 pa_cvolume remapped;
1718 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1721 * Calculates the reference ratio from the sink's reference
1722 * volume. This basically calculates:
1724 * i->reference_ratio = i->volume / i->sink->reference_volume
1727 remapped = i->sink->reference_volume;
1728 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1730 ratio = i->reference_ratio;
1732 for (c = 0; c < i->sample_spec.channels; c++) {
1734 /* We don't update when the sink volume is 0 anyway */
1735 if (remapped.values[c] <= PA_VOLUME_MUTED)
1738 /* Don't update the reference ratio unless necessary */
1739 if (pa_sw_volume_multiply(
1741 remapped.values[c]) == i->volume.values[c])
1744 ratio.values[c] = pa_sw_volume_divide(
1745 i->volume.values[c],
1746 remapped.values[c]);
1749 pa_sink_input_set_reference_ratio(i, &ratio);
1752 /* Called from main context. Only called for the root sink in volume sharing
1753 * cases, except for internal recursive calls. */
1754 static void compute_reference_ratios(pa_sink *s) {
1758 pa_sink_assert_ref(s);
1759 pa_assert_ctl_context();
1760 pa_assert(PA_SINK_IS_LINKED(s->state));
1761 pa_assert(pa_sink_flat_volume_enabled(s));
1763 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1764 compute_reference_ratio(i);
1766 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1767 && PA_SINK_IS_LINKED(i->origin_sink->state))
1768 compute_reference_ratios(i->origin_sink);
1772 /* Called from main context. Only called for the root sink in volume sharing
1773 * cases, except for internal recursive calls. */
1774 static void compute_real_ratios(pa_sink *s) {
1778 pa_sink_assert_ref(s);
1779 pa_assert_ctl_context();
1780 pa_assert(PA_SINK_IS_LINKED(s->state));
1781 pa_assert(pa_sink_flat_volume_enabled(s));
1783 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1785 pa_cvolume remapped;
1787 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1788 /* The origin sink uses volume sharing, so this input's real ratio
1789 * is handled as a special case - the real ratio must be 0 dB, and
1790 * as a result i->soft_volume must equal i->volume_factor. */
1791 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1792 i->soft_volume = i->volume_factor;
1794 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1795 compute_real_ratios(i->origin_sink);
1801 * This basically calculates:
1803 * i->real_ratio := i->volume / s->real_volume
1804 * i->soft_volume := i->real_ratio * i->volume_factor
1807 remapped = s->real_volume;
1808 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1810 i->real_ratio.channels = i->sample_spec.channels;
1811 i->soft_volume.channels = i->sample_spec.channels;
1813 for (c = 0; c < i->sample_spec.channels; c++) {
1815 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1816 /* We leave i->real_ratio untouched */
1817 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1821 /* Don't lose accuracy unless necessary */
1822 if (pa_sw_volume_multiply(
1823 i->real_ratio.values[c],
1824 remapped.values[c]) != i->volume.values[c])
1826 i->real_ratio.values[c] = pa_sw_volume_divide(
1827 i->volume.values[c],
1828 remapped.values[c]);
1830 i->soft_volume.values[c] = pa_sw_volume_multiply(
1831 i->real_ratio.values[c],
1832 i->volume_factor.values[c]);
1835 /* We don't copy the soft_volume to the thread_info data
1836 * here. That must be done by the caller */
1840 static pa_cvolume *cvolume_remap_minimal_impact(
1842 const pa_cvolume *template,
1843 const pa_channel_map *from,
1844 const pa_channel_map *to) {
1849 pa_assert(template);
1852 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1853 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1855 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1856 * mapping from sink input to sink volumes:
1858 * If template is a possible remapping from v it is used instead
1859 * of remapping anew.
1861 * If the channel maps don't match we set an all-channel volume on
1862 * the sink to ensure that changing a volume on one stream has no
1863 * effect that cannot be compensated for in another stream that
1864 * does not have the same channel map as the sink. */
1866 if (pa_channel_map_equal(from, to))
1870 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1875 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1879 /* Called from main thread. Only called for the root sink in volume sharing
1880 * cases, except for internal recursive calls. */
1881 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1885 pa_sink_assert_ref(s);
1886 pa_assert(max_volume);
1887 pa_assert(channel_map);
1888 pa_assert(pa_sink_flat_volume_enabled(s));
1890 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1891 pa_cvolume remapped;
1893 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1894 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1895 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1897 /* Ignore this input. The origin sink uses volume sharing, so this
1898 * input's volume will be set to be equal to the root sink's real
1899 * volume. Obviously this input's current volume must not then
1900 * affect what the root sink's real volume will be. */
1904 remapped = i->volume;
1905 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1906 pa_cvolume_merge(max_volume, max_volume, &remapped);
1910 /* Called from main thread. Only called for the root sink in volume sharing
1911 * cases, except for internal recursive calls. */
1912 static bool has_inputs(pa_sink *s) {
1916 pa_sink_assert_ref(s);
1918 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1919 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1926 /* Called from main thread. Only called for the root sink in volume sharing
1927 * cases, except for internal recursive calls. */
1928 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1932 pa_sink_assert_ref(s);
1933 pa_assert(new_volume);
1934 pa_assert(channel_map);
1936 s->real_volume = *new_volume;
1937 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1939 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1940 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1941 if (pa_sink_flat_volume_enabled(s)) {
1942 pa_cvolume new_input_volume;
1944 /* Follow the root sink's real volume. */
1945 new_input_volume = *new_volume;
1946 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
1947 pa_sink_input_set_volume_direct(i, &new_input_volume);
1948 compute_reference_ratio(i);
1951 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1952 update_real_volume(i->origin_sink, new_volume, channel_map);
1957 /* Called from main thread. Only called for the root sink in shared volume
1959 static void compute_real_volume(pa_sink *s) {
1960 pa_sink_assert_ref(s);
1961 pa_assert_ctl_context();
1962 pa_assert(PA_SINK_IS_LINKED(s->state));
1963 pa_assert(pa_sink_flat_volume_enabled(s));
1964 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1966 /* This determines the maximum volume of all streams and sets
1967 * s->real_volume accordingly. */
1969 if (!has_inputs(s)) {
1970 /* In the special case that we have no sink inputs we leave the
1971 * volume unmodified. */
1972 update_real_volume(s, &s->reference_volume, &s->channel_map);
1976 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1978 /* First let's determine the new maximum volume of all inputs
1979 * connected to this sink */
1980 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1981 update_real_volume(s, &s->real_volume, &s->channel_map);
1983 /* Then, let's update the real ratios/soft volumes of all inputs
1984 * connected to this sink */
1985 compute_real_ratios(s);
1988 /* Called from main thread. Only called for the root sink in shared volume
1989 * cases, except for internal recursive calls. */
1990 static void propagate_reference_volume(pa_sink *s) {
1994 pa_sink_assert_ref(s);
1995 pa_assert_ctl_context();
1996 pa_assert(PA_SINK_IS_LINKED(s->state));
1997 pa_assert(pa_sink_flat_volume_enabled(s));
1999 /* This is called whenever the sink volume changes that is not
2000 * caused by a sink input volume change. We need to fix up the
2001 * sink input volumes accordingly */
2003 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2004 pa_cvolume new_volume;
2006 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2007 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2008 propagate_reference_volume(i->origin_sink);
2010 /* Since the origin sink uses volume sharing, this input's volume
2011 * needs to be updated to match the root sink's real volume, but
2012 * that will be done later in update_real_volume(). */
2016 /* This basically calculates:
2018 * i->volume := s->reference_volume * i->reference_ratio */
2020 new_volume = s->reference_volume;
2021 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2022 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2023 pa_sink_input_set_volume_direct(i, &new_volume);
2027 /* Called from main thread. Only called for the root sink in volume sharing
2028 * cases, except for internal recursive calls. The return value indicates
2029 * whether any reference volume actually changed. */
2030 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2032 bool reference_volume_changed;
2036 pa_sink_assert_ref(s);
2037 pa_assert(PA_SINK_IS_LINKED(s->state));
2039 pa_assert(channel_map);
2040 pa_assert(pa_cvolume_valid(v));
2043 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2045 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2046 pa_sink_set_reference_volume_direct(s, &volume);
2048 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2050 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2051 /* If the root sink's volume doesn't change, then there can't be any
2052 * changes in the other sinks in the sink tree either.
2054 * It's probably theoretically possible that even if the root sink's
2055 * volume changes slightly, some filter sink doesn't change its volume
2056 * due to rounding errors. If that happens, we still want to propagate
2057 * the changed root sink volume to the sinks connected to the
2058 * intermediate sink that didn't change its volume. This theoretical
2059 * possibility is the reason why we have that !(s->flags &
2060 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2061 * notice even if we returned here false always if
2062 * reference_volume_changed is false. */
2065 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2066 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2067 && PA_SINK_IS_LINKED(i->origin_sink->state))
2068 update_reference_volume(i->origin_sink, v, channel_map, false);
2074 /* Called from main thread */
2075 void pa_sink_set_volume(
2077 const pa_cvolume *volume,
2081 pa_cvolume new_reference_volume;
2084 pa_sink_assert_ref(s);
2085 pa_assert_ctl_context();
2086 pa_assert(PA_SINK_IS_LINKED(s->state));
2087 pa_assert(!volume || pa_cvolume_valid(volume));
2088 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2089 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2091 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2092 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2093 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2094 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2098 /* In case of volume sharing, the volume is set for the root sink first,
2099 * from which it's then propagated to the sharing sinks. */
2100 root_sink = pa_sink_get_master(s);
2102 if (PA_UNLIKELY(!root_sink))
2105 /* As a special exception we accept mono volumes on all sinks --
2106 * even on those with more complex channel maps */
2109 if (pa_cvolume_compatible(volume, &s->sample_spec))
2110 new_reference_volume = *volume;
2112 new_reference_volume = s->reference_volume;
2113 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2116 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2118 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2119 if (pa_sink_flat_volume_enabled(root_sink)) {
2120 /* OK, propagate this volume change back to the inputs */
2121 propagate_reference_volume(root_sink);
2123 /* And now recalculate the real volume */
2124 compute_real_volume(root_sink);
2126 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2130 /* If volume is NULL we synchronize the sink's real and
2131 * reference volumes with the stream volumes. */
2133 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2135 /* Ok, let's determine the new real volume */
2136 compute_real_volume(root_sink);
2138 /* Let's 'push' the reference volume if necessary */
2139 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2140 /* If the sink and its root don't have the same number of channels, we need to remap */
2141 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2142 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2143 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2145 /* Now that the reference volume is updated, we can update the streams'
2146 * reference ratios. */
2147 compute_reference_ratios(root_sink);
2150 if (root_sink->set_volume) {
2151 /* If we have a function set_volume(), then we do not apply a
2152 * soft volume by default. However, set_volume() is free to
2153 * apply one to root_sink->soft_volume */
2155 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2156 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2157 root_sink->set_volume(root_sink);
2160 /* If we have no function set_volume(), then the soft volume
2161 * becomes the real volume */
2162 root_sink->soft_volume = root_sink->real_volume;
2164 /* This tells the sink that soft volume and/or real volume changed */
2166 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2169 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2170 * Only to be called by sink implementor */
2171 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2173 pa_sink_assert_ref(s);
2174 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2176 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2177 pa_sink_assert_io_context(s);
2179 pa_assert_ctl_context();
2182 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2184 s->soft_volume = *volume;
2186 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2187 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2189 s->thread_info.soft_volume = s->soft_volume;
2192 /* Called from the main thread. Only called for the root sink in volume sharing
2193 * cases, except for internal recursive calls. */
2194 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2198 pa_sink_assert_ref(s);
2199 pa_assert(old_real_volume);
2200 pa_assert_ctl_context();
2201 pa_assert(PA_SINK_IS_LINKED(s->state));
2203 /* This is called when the hardware's real volume changes due to
2204 * some external event. We copy the real volume into our
2205 * reference volume and then rebuild the stream volumes based on
2206 * i->real_ratio which should stay fixed. */
2208 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2209 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2212 /* 1. Make the real volume the reference volume */
2213 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2216 if (pa_sink_flat_volume_enabled(s)) {
2218 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2219 pa_cvolume new_volume;
2221 /* 2. Since the sink's reference and real volumes are equal
2222 * now our ratios should be too. */
2223 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2225 /* 3. Recalculate the new stream reference volume based on the
2226 * reference ratio and the sink's reference volume.
2228 * This basically calculates:
2230 * i->volume = s->reference_volume * i->reference_ratio
2232 * This is identical to propagate_reference_volume() */
2233 new_volume = s->reference_volume;
2234 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2235 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2236 pa_sink_input_set_volume_direct(i, &new_volume);
2238 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2239 && PA_SINK_IS_LINKED(i->origin_sink->state))
2240 propagate_real_volume(i->origin_sink, old_real_volume);
2244 /* Something got changed in the hardware. It probably makes sense
2245 * to save changed hw settings given that hw volume changes not
2246 * triggered by PA are almost certainly done by the user. */
2247 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2248 s->save_volume = true;
2251 /* Called from io thread */
2252 void pa_sink_update_volume_and_mute(pa_sink *s) {
2254 pa_sink_assert_io_context(s);
2256 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2259 /* Called from main thread */
2260 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2261 pa_sink_assert_ref(s);
2262 pa_assert_ctl_context();
2263 pa_assert(PA_SINK_IS_LINKED(s->state));
2265 if (s->refresh_volume || force_refresh) {
2266 struct pa_cvolume old_real_volume;
2268 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2270 old_real_volume = s->real_volume;
2272 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2275 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2277 update_real_volume(s, &s->real_volume, &s->channel_map);
2278 propagate_real_volume(s, &old_real_volume);
2281 return &s->reference_volume;
2284 /* Called from main thread. In volume sharing cases, only the root sink may
2286 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2287 pa_cvolume old_real_volume;
2289 pa_sink_assert_ref(s);
2290 pa_assert_ctl_context();
2291 pa_assert(PA_SINK_IS_LINKED(s->state));
2292 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2294 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2296 old_real_volume = s->real_volume;
2297 update_real_volume(s, new_real_volume, &s->channel_map);
2298 propagate_real_volume(s, &old_real_volume);
2301 /* Called from main thread */
2302 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2305 pa_sink_assert_ref(s);
2306 pa_assert_ctl_context();
2308 old_muted = s->muted;
2310 if (mute == old_muted) {
2311 s->save_muted |= save;
2316 s->save_muted = save;
2318 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2319 s->set_mute_in_progress = true;
2321 s->set_mute_in_progress = false;
2324 if (!PA_SINK_IS_LINKED(s->state))
2327 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2328 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2329 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2330 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2333 /* Called from main thread */
2334 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2336 pa_sink_assert_ref(s);
2337 pa_assert_ctl_context();
2338 pa_assert(PA_SINK_IS_LINKED(s->state));
2340 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2343 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2344 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2345 pa_sink_mute_changed(s, mute);
2347 if (s->get_mute(s, &mute) >= 0)
2348 pa_sink_mute_changed(s, mute);
2355 /* Called from main thread */
2356 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2357 pa_sink_assert_ref(s);
2358 pa_assert_ctl_context();
2359 pa_assert(PA_SINK_IS_LINKED(s->state));
2361 if (s->set_mute_in_progress)
2364 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2365 * but we must have this here also, because the save parameter of
2366 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2367 * the mute state when it shouldn't be saved). */
2368 if (new_muted == s->muted)
2371 pa_sink_set_mute(s, new_muted, true);
2374 /* Called from main thread */
2375 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2376 pa_sink_assert_ref(s);
2377 pa_assert_ctl_context();
2380 pa_proplist_update(s->proplist, mode, p);
2382 if (PA_SINK_IS_LINKED(s->state)) {
2383 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2384 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2390 /* Called from main thread */
2391 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2392 void pa_sink_set_description(pa_sink *s, const char *description) {
2394 pa_sink_assert_ref(s);
2395 pa_assert_ctl_context();
2397 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2400 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2402 if (old && description && pa_streq(old, description))
2406 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2408 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2410 if (s->monitor_source) {
2413 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2414 pa_source_set_description(s->monitor_source, n);
2418 if (PA_SINK_IS_LINKED(s->state)) {
2419 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2420 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2424 /* Called from main thread */
2425 unsigned pa_sink_linked_by(pa_sink *s) {
2428 pa_sink_assert_ref(s);
2429 pa_assert_ctl_context();
2430 pa_assert(PA_SINK_IS_LINKED(s->state));
2432 ret = pa_idxset_size(s->inputs);
2434 /* We add in the number of streams connected to us here. Please
2435 * note the asymmetry to pa_sink_used_by()! */
2437 if (s->monitor_source)
2438 ret += pa_source_linked_by(s->monitor_source);
2443 /* Called from main thread */
2444 unsigned pa_sink_used_by(pa_sink *s) {
2447 pa_sink_assert_ref(s);
2448 pa_assert_ctl_context();
2449 pa_assert(PA_SINK_IS_LINKED(s->state));
2451 ret = pa_idxset_size(s->inputs);
2452 pa_assert(ret >= s->n_corked);
2454 /* Streams connected to our monitor source do not matter for
2455 * pa_sink_used_by()!.*/
2457 return ret - s->n_corked;
2460 /* Called from main thread */
2461 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2466 pa_sink_assert_ref(s);
2467 pa_assert_ctl_context();
2469 if (!PA_SINK_IS_LINKED(s->state))
2474 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2475 if (i == ignore_input)
2478 /* We do not assert here. It is perfectly valid for a sink input to
2479 * be in the INIT state (i.e. created, marked done but not yet put)
2480 * and we should not care if it's unlinked as it won't contribute
2481 * towards our busy status.
2483 if (!PA_SINK_INPUT_IS_LINKED(i->state))
2486 if (i->state == PA_SINK_INPUT_CORKED)
2489 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2495 if (s->monitor_source)
2496 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2501 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2503 case PA_SINK_INIT: return "INIT";
2504 case PA_SINK_IDLE: return "IDLE";
2505 case PA_SINK_RUNNING: return "RUNNING";
2506 case PA_SINK_SUSPENDED: return "SUSPENDED";
2507 case PA_SINK_UNLINKED: return "UNLINKED";
2508 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2511 pa_assert_not_reached();
2514 /* Called from the IO thread */
2515 static void sync_input_volumes_within_thread(pa_sink *s) {
2519 pa_sink_assert_ref(s);
2520 pa_sink_assert_io_context(s);
2522 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2523 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2526 i->thread_info.soft_volume = i->soft_volume;
2527 pa_sink_input_request_rewind(i, 0, true, false, false);
2531 /* Called from the IO thread. Only called for the root sink in volume sharing
2532 * cases, except for internal recursive calls. */
2533 static void set_shared_volume_within_thread(pa_sink *s) {
2534 pa_sink_input *i = NULL;
2537 pa_sink_assert_ref(s);
2539 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2541 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2542 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2543 set_shared_volume_within_thread(i->origin_sink);
2547 /* Called from IO thread, except when it is not */
2548 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2549 pa_sink *s = PA_SINK(o);
2550 pa_sink_assert_ref(s);
2552 switch ((pa_sink_message_t) code) {
2554 case PA_SINK_MESSAGE_ADD_INPUT: {
2555 pa_sink_input *i = PA_SINK_INPUT(userdata);
2557 /* If you change anything here, make sure to change the
2558 * sink input handling a few lines down at
2559 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2561 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2563 /* Since the caller sleeps in pa_sink_input_put(), we can
2564 * safely access data outside of thread_info even though
2567 if ((i->thread_info.sync_prev = i->sync_prev)) {
2568 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2569 pa_assert(i->sync_prev->sync_next == i);
2570 i->thread_info.sync_prev->thread_info.sync_next = i;
2573 if ((i->thread_info.sync_next = i->sync_next)) {
2574 pa_assert(i->sink == i->thread_info.sync_next->sink);
2575 pa_assert(i->sync_next->sync_prev == i);
2576 i->thread_info.sync_next->thread_info.sync_prev = i;
2579 pa_sink_input_attach(i);
2581 pa_sink_input_set_state_within_thread(i, i->state);
2583 /* The requested latency of the sink input needs to be fixed up and
2584 * then configured on the sink. If this causes the sink latency to
2585 * go down, the sink implementor is responsible for doing a rewind
2586 * in the update_requested_latency() callback to ensure that the
2587 * sink buffer doesn't contain more data than what the new latency
2590 * XXX: Does it really make sense to push this responsibility to
2591 * the sink implementors? Wouldn't it be better to do it once in
2592 * the core than many times in the modules? */
2594 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2595 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2597 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2598 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2600 /* We don't rewind here automatically. This is left to the
2601 * sink input implementor because some sink inputs need a
2602 * slow start, i.e. need some time to buffer client
2603 * samples before beginning streaming.
2605 * XXX: Does it really make sense to push this functionality to
2606 * the sink implementors? Wouldn't it be better to do it once in
2607 * the core than many times in the modules? */
2609 /* In flat volume mode we need to update the volume as
2611 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2614 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2615 pa_sink_input *i = PA_SINK_INPUT(userdata);
2617 /* If you change anything here, make sure to change the
2618 * sink input handling a few lines down at
2619 * PA_SINK_MESSAGE_START_MOVE, too. */
2621 pa_sink_input_detach(i);
2623 pa_sink_input_set_state_within_thread(i, i->state);
2625 /* Since the caller sleeps in pa_sink_input_unlink(),
2626 * we can safely access data outside of thread_info even
2627 * though it is mutable */
2629 pa_assert(!i->sync_prev);
2630 pa_assert(!i->sync_next);
2632 if (i->thread_info.sync_prev) {
2633 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2634 i->thread_info.sync_prev = NULL;
2637 if (i->thread_info.sync_next) {
2638 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2639 i->thread_info.sync_next = NULL;
2642 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2643 pa_sink_invalidate_requested_latency(s, true);
2644 pa_sink_request_rewind(s, (size_t) -1);
2646 /* In flat volume mode we need to update the volume as
2648 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2651 case PA_SINK_MESSAGE_START_MOVE: {
2652 pa_sink_input *i = PA_SINK_INPUT(userdata);
2654 /* We don't support moving synchronized streams. */
2655 pa_assert(!i->sync_prev);
2656 pa_assert(!i->sync_next);
2657 pa_assert(!i->thread_info.sync_next);
2658 pa_assert(!i->thread_info.sync_prev);
2660 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2662 size_t sink_nbytes, total_nbytes;
2664 /* The old sink probably has some audio from this
2665 * stream in its buffer. We want to "take it back" as
2666 * much as possible and play it to the new sink. We
2667 * don't know at this point how much the old sink can
2668 * rewind. We have to pick something, and that
2669 * something is the full latency of the old sink here.
2670 * So we rewind the stream buffer by the sink latency
2671 * amount, which may be more than what we should
2672 * rewind. This can result in a chunk of audio being
2673 * played both to the old sink and the new sink.
2675 * FIXME: Fix this code so that we don't have to make
2676 * guesses about how much the sink will actually be
2677 * able to rewind. If someone comes up with a solution
2678 * for this, something to note is that the part of the
2679 * latency that the old sink couldn't rewind should
2680 * ideally be compensated after the stream has moved
2681 * to the new sink by adding silence. The new sink
2682 * most likely can't start playing the moved stream
2683 * immediately, and that gap should be removed from
2684 * the "compensation silence" (at least at the time of
2685 * writing this, the move finish code will actually
2686 * already take care of dropping the new sink's
2687 * unrewindable latency, so taking into account the
2688 * unrewindable latency of the old sink is the only
2691 * The render_memblockq contents are discarded,
2692 * because when the sink changes, the format of the
2693 * audio stored in the render_memblockq may change
2694 * too, making the stored audio invalid. FIXME:
2695 * However, the read and write indices are moved back
2696 * the same amount, so if they are not the same now,
2697 * they won't be the same after the rewind either. If
2698 * the write index of the render_memblockq is ahead of
2699 * the read index, then the render_memblockq will feed
2700 * the new sink some silence first, which it shouldn't
2701 * do. The write index should be flushed to be the
2702 * same as the read index. */
2704 /* Get the latency of the sink */
2705 usec = pa_sink_get_latency_within_thread(s, false);
2706 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2707 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2709 if (total_nbytes > 0) {
2710 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2711 i->thread_info.rewrite_flush = true;
2712 pa_sink_input_process_rewind(i, sink_nbytes);
2716 pa_sink_input_detach(i);
2718 /* Let's remove the sink input ...*/
2719 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2721 pa_sink_invalidate_requested_latency(s, true);
2723 pa_log_debug("Requesting rewind due to started move");
2724 pa_sink_request_rewind(s, (size_t) -1);
2726 /* In flat volume mode we need to update the volume as
2728 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2731 case PA_SINK_MESSAGE_FINISH_MOVE: {
2732 pa_sink_input *i = PA_SINK_INPUT(userdata);
2734 /* We don't support moving synchronized streams. */
2735 pa_assert(!i->sync_prev);
2736 pa_assert(!i->sync_next);
2737 pa_assert(!i->thread_info.sync_next);
2738 pa_assert(!i->thread_info.sync_prev);
2740 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2742 pa_sink_input_attach(i);
2744 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2748 /* In the ideal case the new sink would start playing
2749 * the stream immediately. That requires the sink to
2750 * be able to rewind all of its latency, which usually
2751 * isn't possible, so there will probably be some gap
2752 * before the moved stream becomes audible. We then
2753 * have two possibilities: 1) start playing the stream
2754 * from where it is now, or 2) drop the unrewindable
2755 * latency of the sink from the stream. With option 1
2756 * we won't lose any audio but the stream will have a
2757 * pause. With option 2 we may lose some audio but the
2758 * stream time will be somewhat in sync with the wall
2759 * clock. Lennart seems to have chosen option 2 (one
2760 * of the reasons might have been that option 1 is
2761 * actually much harder to implement), so we drop the
2762 * latency of the new sink from the moved stream and
2763 * hope that the sink will undo most of that in the
2766 /* Get the latency of the sink */
2767 usec = pa_sink_get_latency_within_thread(s, false);
2768 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2771 pa_sink_input_drop(i, nbytes);
2773 pa_log_debug("Requesting rewind due to finished move");
2774 pa_sink_request_rewind(s, nbytes);
2777 /* Updating the requested sink latency has to be done
2778 * after the sink rewind request, not before, because
2779 * otherwise the sink may limit the rewind amount
2782 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2783 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2785 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2786 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2788 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2791 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2792 pa_sink *root_sink = pa_sink_get_master(s);
2794 if (PA_LIKELY(root_sink))
2795 set_shared_volume_within_thread(root_sink);
2800 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2802 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2804 pa_sink_volume_change_push(s);
2806 /* Fall through ... */
2808 case PA_SINK_MESSAGE_SET_VOLUME:
2810 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2811 s->thread_info.soft_volume = s->soft_volume;
2812 pa_sink_request_rewind(s, (size_t) -1);
2815 /* Fall through ... */
2817 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2818 sync_input_volumes_within_thread(s);
2821 case PA_SINK_MESSAGE_GET_VOLUME:
2823 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2825 pa_sink_volume_change_flush(s);
2826 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2829 /* In case sink implementor reset SW volume. */
2830 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2831 s->thread_info.soft_volume = s->soft_volume;
2832 pa_sink_request_rewind(s, (size_t) -1);
2837 case PA_SINK_MESSAGE_SET_MUTE:
2839 if (s->thread_info.soft_muted != s->muted) {
2840 s->thread_info.soft_muted = s->muted;
2841 pa_sink_request_rewind(s, (size_t) -1);
2844 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2849 case PA_SINK_MESSAGE_GET_MUTE:
2851 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2852 return s->get_mute(s, userdata);
2856 case PA_SINK_MESSAGE_SET_STATE: {
2857 struct set_state_data *data = userdata;
2858 bool suspend_change =
2859 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2860 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2862 if (s->set_state_in_io_thread) {
2865 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2869 s->thread_info.state = data->state;
2871 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2872 s->thread_info.rewind_nbytes = 0;
2873 s->thread_info.rewind_requested = false;
2876 if (suspend_change) {
2880 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2881 if (i->suspend_within_thread)
2882 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2888 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2890 pa_usec_t *usec = userdata;
2891 *usec = pa_sink_get_requested_latency_within_thread(s);
2893 /* Yes, that's right, the IO thread will see -1 when no
2894 * explicit requested latency is configured, the main
2895 * thread will see max_latency */
2896 if (*usec == (pa_usec_t) -1)
2897 *usec = s->thread_info.max_latency;
2902 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2903 pa_usec_t *r = userdata;
2905 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2910 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2911 pa_usec_t *r = userdata;
2913 r[0] = s->thread_info.min_latency;
2914 r[1] = s->thread_info.max_latency;
2919 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2921 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2924 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2926 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2929 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2931 *((size_t*) userdata) = s->thread_info.max_rewind;
2934 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2936 *((size_t*) userdata) = s->thread_info.max_request;
2939 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2941 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2944 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2946 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2949 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2950 /* This message is sent from IO-thread and handled in main thread. */
2951 pa_assert_ctl_context();
2953 /* Make sure we're not messing with main thread when no longer linked */
2954 if (!PA_SINK_IS_LINKED(s->state))
2957 pa_sink_get_volume(s, true);
2958 pa_sink_get_mute(s, true);
2961 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
2962 s->thread_info.port_latency_offset = offset;
2965 case PA_SINK_MESSAGE_GET_LATENCY:
2966 case PA_SINK_MESSAGE_MAX:
2973 /* Called from main thread */
2974 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2979 pa_core_assert_ref(c);
2980 pa_assert_ctl_context();
2981 pa_assert(cause != 0);
2983 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2986 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2993 /* Called from IO thread */
2994 void pa_sink_detach_within_thread(pa_sink *s) {
2998 pa_sink_assert_ref(s);
2999 pa_sink_assert_io_context(s);
3000 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3002 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3003 pa_sink_input_detach(i);
3005 if (s->monitor_source)
3006 pa_source_detach_within_thread(s->monitor_source);
3009 /* Called from IO thread */
3010 void pa_sink_attach_within_thread(pa_sink *s) {
3014 pa_sink_assert_ref(s);
3015 pa_sink_assert_io_context(s);
3016 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3018 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3019 pa_sink_input_attach(i);
3021 if (s->monitor_source)
3022 pa_source_attach_within_thread(s->monitor_source);
3025 /* Called from IO thread */
3026 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3027 pa_sink_assert_ref(s);
3028 pa_sink_assert_io_context(s);
3029 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3031 if (nbytes == (size_t) -1)
3032 nbytes = s->thread_info.max_rewind;
3034 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3036 if (s->thread_info.rewind_requested &&
3037 nbytes <= s->thread_info.rewind_nbytes)
3040 s->thread_info.rewind_nbytes = nbytes;
3041 s->thread_info.rewind_requested = true;
3043 if (s->request_rewind)
3044 s->request_rewind(s);
3047 /* Called from IO thread */
3048 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3049 pa_usec_t result = (pa_usec_t) -1;
3052 pa_usec_t monitor_latency;
3054 pa_sink_assert_ref(s);
3055 pa_sink_assert_io_context(s);
3057 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3058 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3060 if (s->thread_info.requested_latency_valid)
3061 return s->thread_info.requested_latency;
3063 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3064 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3065 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3066 result = i->thread_info.requested_sink_latency;
3068 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3070 if (monitor_latency != (pa_usec_t) -1 &&
3071 (result == (pa_usec_t) -1 || result > monitor_latency))
3072 result = monitor_latency;
3074 if (result != (pa_usec_t) -1)
3075 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3077 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3078 /* Only cache if properly initialized */
3079 s->thread_info.requested_latency = result;
3080 s->thread_info.requested_latency_valid = true;
3086 /* Called from main thread */
3087 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3090 pa_sink_assert_ref(s);
3091 pa_assert_ctl_context();
3092 pa_assert(PA_SINK_IS_LINKED(s->state));
3094 if (s->state == PA_SINK_SUSPENDED)
3097 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3102 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3103 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3107 pa_sink_assert_ref(s);
3108 pa_sink_assert_io_context(s);
3110 if (max_rewind == s->thread_info.max_rewind)
3113 s->thread_info.max_rewind = max_rewind;
3115 if (PA_SINK_IS_LINKED(s->thread_info.state))
3116 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3117 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3119 if (s->monitor_source)
3120 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3123 /* Called from main thread */
3124 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3125 pa_sink_assert_ref(s);
3126 pa_assert_ctl_context();
3128 if (PA_SINK_IS_LINKED(s->state))
3129 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3131 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3134 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3135 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3138 pa_sink_assert_ref(s);
3139 pa_sink_assert_io_context(s);
3141 if (max_request == s->thread_info.max_request)
3144 s->thread_info.max_request = max_request;
3146 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3149 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3150 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3154 /* Called from main thread */
3155 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3156 pa_sink_assert_ref(s);
3157 pa_assert_ctl_context();
3159 if (PA_SINK_IS_LINKED(s->state))
3160 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3162 pa_sink_set_max_request_within_thread(s, max_request);
3165 /* Called from IO thread */
3166 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3170 pa_sink_assert_ref(s);
3171 pa_sink_assert_io_context(s);
3173 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3174 s->thread_info.requested_latency_valid = false;
3178 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3180 if (s->update_requested_latency)
3181 s->update_requested_latency(s);
3183 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3184 if (i->update_sink_requested_latency)
3185 i->update_sink_requested_latency(i);
3189 /* Called from main thread */
3190 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3191 pa_sink_assert_ref(s);
3192 pa_assert_ctl_context();
3194 /* min_latency == 0: no limit
3195 * min_latency anything else: specified limit
3197 * Similar for max_latency */
3199 if (min_latency < ABSOLUTE_MIN_LATENCY)
3200 min_latency = ABSOLUTE_MIN_LATENCY;
3202 if (max_latency <= 0 ||
3203 max_latency > ABSOLUTE_MAX_LATENCY)
3204 max_latency = ABSOLUTE_MAX_LATENCY;
3206 pa_assert(min_latency <= max_latency);
3208 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3209 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3210 max_latency == ABSOLUTE_MAX_LATENCY) ||
3211 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3213 if (PA_SINK_IS_LINKED(s->state)) {
3219 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3221 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3224 /* Called from main thread */
3225 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3226 pa_sink_assert_ref(s);
3227 pa_assert_ctl_context();
3228 pa_assert(min_latency);
3229 pa_assert(max_latency);
3231 if (PA_SINK_IS_LINKED(s->state)) {
3232 pa_usec_t r[2] = { 0, 0 };
3234 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3236 *min_latency = r[0];
3237 *max_latency = r[1];
3239 *min_latency = s->thread_info.min_latency;
3240 *max_latency = s->thread_info.max_latency;
3244 /* Called from IO thread */
3245 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3246 pa_sink_assert_ref(s);
3247 pa_sink_assert_io_context(s);
3249 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3250 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3251 pa_assert(min_latency <= max_latency);
3253 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3254 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3255 max_latency == ABSOLUTE_MAX_LATENCY) ||
3256 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3258 if (s->thread_info.min_latency == min_latency &&
3259 s->thread_info.max_latency == max_latency)
3262 s->thread_info.min_latency = min_latency;
3263 s->thread_info.max_latency = max_latency;
3265 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3269 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3270 if (i->update_sink_latency_range)
3271 i->update_sink_latency_range(i);
3274 pa_sink_invalidate_requested_latency(s, false);
3276 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3279 /* Called from main thread */
3280 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3281 pa_sink_assert_ref(s);
3282 pa_assert_ctl_context();
3284 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3285 pa_assert(latency == 0);
3289 if (latency < ABSOLUTE_MIN_LATENCY)
3290 latency = ABSOLUTE_MIN_LATENCY;
3292 if (latency > ABSOLUTE_MAX_LATENCY)
3293 latency = ABSOLUTE_MAX_LATENCY;
3295 if (PA_SINK_IS_LINKED(s->state))
3296 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3298 s->thread_info.fixed_latency = latency;
3300 pa_source_set_fixed_latency(s->monitor_source, latency);
3303 /* Called from main thread */
3304 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3307 pa_sink_assert_ref(s);
3308 pa_assert_ctl_context();
3310 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3313 if (PA_SINK_IS_LINKED(s->state))
3314 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3316 latency = s->thread_info.fixed_latency;
3321 /* Called from IO thread */
3322 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3323 pa_sink_assert_ref(s);
3324 pa_sink_assert_io_context(s);
3326 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3327 pa_assert(latency == 0);
3328 s->thread_info.fixed_latency = 0;
3330 if (s->monitor_source)
3331 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3336 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3337 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3339 if (s->thread_info.fixed_latency == latency)
3342 s->thread_info.fixed_latency = latency;
3344 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3348 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3349 if (i->update_sink_fixed_latency)
3350 i->update_sink_fixed_latency(i);
3353 pa_sink_invalidate_requested_latency(s, false);
3355 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3358 /* Called from main context */
3359 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3360 pa_sink_assert_ref(s);
3362 s->port_latency_offset = offset;
3364 if (PA_SINK_IS_LINKED(s->state))
3365 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3367 s->thread_info.port_latency_offset = offset;
3369 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3372 /* Called from main context */
3373 size_t pa_sink_get_max_rewind(pa_sink *s) {
3375 pa_assert_ctl_context();
3376 pa_sink_assert_ref(s);
3378 if (!PA_SINK_IS_LINKED(s->state))
3379 return s->thread_info.max_rewind;
3381 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3386 /* Called from main context */
3387 size_t pa_sink_get_max_request(pa_sink *s) {
3389 pa_sink_assert_ref(s);
3390 pa_assert_ctl_context();
3392 if (!PA_SINK_IS_LINKED(s->state))
3393 return s->thread_info.max_request;
3395 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3400 /* Called from main context */
3401 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3402 pa_device_port *port;
3404 pa_sink_assert_ref(s);
3405 pa_assert_ctl_context();
3408 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3409 return -PA_ERR_NOTIMPLEMENTED;
3413 return -PA_ERR_NOENTITY;
3415 if (!(port = pa_hashmap_get(s->ports, name)))
3416 return -PA_ERR_NOENTITY;
3418 if (s->active_port == port) {
3419 s->save_port = s->save_port || save;
3423 if (s->set_port(s, port) < 0)
3424 return -PA_ERR_NOENTITY;
3426 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3428 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3430 s->active_port = port;
3431 s->save_port = save;
3433 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3435 /* The active port affects the default sink selection. */
3436 pa_core_update_default_sink(s->core);
3438 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3443 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3444 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3448 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3451 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3453 if (pa_streq(ff, "microphone"))
3454 t = "audio-input-microphone";
3455 else if (pa_streq(ff, "webcam"))
3457 else if (pa_streq(ff, "computer"))
3459 else if (pa_streq(ff, "handset"))
3461 else if (pa_streq(ff, "portable"))
3462 t = "multimedia-player";
3463 else if (pa_streq(ff, "tv"))
3464 t = "video-display";
3467 * The following icons are not part of the icon naming spec,
3468 * because Rodney Dawes sucks as the maintainer of that spec.
3470 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3472 else if (pa_streq(ff, "headset"))
3473 t = "audio-headset";
3474 else if (pa_streq(ff, "headphone"))
3475 t = "audio-headphones";
3476 else if (pa_streq(ff, "speaker"))
3477 t = "audio-speakers";
3478 else if (pa_streq(ff, "hands-free"))
3479 t = "audio-handsfree";
3483 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3484 if (pa_streq(c, "modem"))
3491 t = "audio-input-microphone";
3494 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3495 if (strstr(profile, "analog"))
3497 else if (strstr(profile, "iec958"))
3499 else if (strstr(profile, "hdmi"))
3503 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3505 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3510 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3511 const char *s, *d = NULL, *k;
3514 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3518 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3522 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3523 if (pa_streq(s, "internal"))
3524 d = _("Built-in Audio");
3527 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3528 if (pa_streq(s, "modem"))
3532 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3537 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3540 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3542 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3547 bool pa_device_init_intended_roles(pa_proplist *p) {
3551 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3554 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3555 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3556 || pa_streq(s, "headset")) {
3557 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3564 unsigned pa_device_init_priority(pa_proplist *p) {
3566 unsigned priority = 0;
3570 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3572 if (pa_streq(s, "sound"))
3574 else if (!pa_streq(s, "modem"))
3578 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3580 if (pa_streq(s, "headphone"))
3582 else if (pa_streq(s, "hifi"))
3584 else if (pa_streq(s, "speaker"))
3586 else if (pa_streq(s, "portable"))
3590 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3592 if (pa_streq(s, "bluetooth"))
3594 else if (pa_streq(s, "usb"))
3596 else if (pa_streq(s, "pci"))
3600 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3602 if (pa_startswith(s, "analog-"))
3604 else if (pa_startswith(s, "iec958-"))
3611 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3613 /* Called from the IO thread. */
3614 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3615 pa_sink_volume_change *c;
3616 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3617 c = pa_xnew(pa_sink_volume_change, 1);
3619 PA_LLIST_INIT(pa_sink_volume_change, c);
3621 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3625 /* Called from the IO thread. */
3626 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3628 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3632 /* Called from the IO thread. */
3633 void pa_sink_volume_change_push(pa_sink *s) {
3634 pa_sink_volume_change *c = NULL;
3635 pa_sink_volume_change *nc = NULL;
3636 pa_sink_volume_change *pc = NULL;
3637 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3639 const char *direction = NULL;
3642 nc = pa_sink_volume_change_new(s);
3644 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3645 * Adding one more volume for HW would get us rid of this, but I am trying
3646 * to survive with the ones we already have. */
3647 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3649 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3650 pa_log_debug("Volume not changing");
3651 pa_sink_volume_change_free(nc);
3655 nc->at = pa_sink_get_latency_within_thread(s, false);
3656 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3658 if (s->thread_info.volume_changes_tail) {
3659 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3660 /* If volume is going up let's do it a bit late. If it is going
3661 * down let's do it a bit early. */
3662 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3663 if (nc->at + safety_margin > c->at) {
3664 nc->at += safety_margin;
3669 else if (nc->at - safety_margin > c->at) {
3670 nc->at -= safety_margin;
3678 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3679 nc->at += safety_margin;
3682 nc->at -= safety_margin;
3685 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3688 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3691 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3693 /* We can ignore volume events that came earlier but should happen later than this. */
3694 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3695 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3696 pa_sink_volume_change_free(c);
3699 s->thread_info.volume_changes_tail = nc;
3702 /* Called from the IO thread. */
3703 static void pa_sink_volume_change_flush(pa_sink *s) {
3704 pa_sink_volume_change *c = s->thread_info.volume_changes;
3706 s->thread_info.volume_changes = NULL;
3707 s->thread_info.volume_changes_tail = NULL;
3709 pa_sink_volume_change *next = c->next;
3710 pa_sink_volume_change_free(c);
3715 /* Called from the IO thread. */
3716 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3722 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3728 pa_assert(s->write_volume);
3730 now = pa_rtclock_now();
3732 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3733 pa_sink_volume_change *c = s->thread_info.volume_changes;
3734 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3735 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3736 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3738 s->thread_info.current_hw_volume = c->hw_volume;
3739 pa_sink_volume_change_free(c);
3745 if (s->thread_info.volume_changes) {
3747 *usec_to_next = s->thread_info.volume_changes->at - now;
3748 if (pa_log_ratelimit(PA_LOG_DEBUG))
3749 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3754 s->thread_info.volume_changes_tail = NULL;
3759 /* Called from the IO thread. */
3760 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3761 /* All the queued volume events later than current latency are shifted to happen earlier. */
3762 pa_sink_volume_change *c;
3763 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3764 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3765 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3767 pa_log_debug("latency = %lld", (long long) limit);
3768 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3770 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3771 pa_usec_t modified_limit = limit;
3772 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3773 modified_limit -= s->thread_info.volume_change_safety_margin;
3775 modified_limit += s->thread_info.volume_change_safety_margin;
3776 if (c->at > modified_limit) {
3778 if (c->at < modified_limit)
3779 c->at = modified_limit;
3781 prev_vol = pa_cvolume_avg(&c->hw_volume);
3783 pa_sink_volume_change_apply(s, NULL);
3786 /* Called from the main thread */
3787 /* Gets the list of formats supported by the sink. The members and idxset must
3788 * be freed by the caller. */
3789 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3794 if (s->get_formats) {
3795 /* Sink supports format query, all is good */
3796 ret = s->get_formats(s);
3798 /* Sink doesn't support format query, so assume it does PCM */
3799 pa_format_info *f = pa_format_info_new();
3800 f->encoding = PA_ENCODING_PCM;
3802 ret = pa_idxset_new(NULL, NULL);
3803 pa_idxset_put(ret, f, NULL);
3809 /* Called from the main thread */
3810 /* Allows an external source to set what formats a sink supports if the sink
3811 * permits this. The function makes a copy of the formats on success. */
3812 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3817 /* Sink supports setting formats -- let's give it a shot */
3818 return s->set_formats(s, formats);
3820 /* Sink doesn't support setting this -- bail out */
3824 /* Called from the main thread */
3825 /* Checks if the sink can accept this format */
3826 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3827 pa_idxset *formats = NULL;
3833 formats = pa_sink_get_formats(s);
3836 pa_format_info *finfo_device;
3839 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3840 if (pa_format_info_is_compatible(finfo_device, f)) {
3846 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3852 /* Called from the main thread */
3853 /* Calculates the intersection between formats supported by the sink and
3854 * in_formats, and returns these, in the order of the sink's formats. */
3855 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3856 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3857 pa_format_info *f_sink, *f_in;
3862 if (!in_formats || pa_idxset_isempty(in_formats))
3865 sink_formats = pa_sink_get_formats(s);
3867 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3868 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3869 if (pa_format_info_is_compatible(f_sink, f_in))
3870 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3876 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3881 /* Called from the main thread */
3882 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3883 pa_sample_format_t old_format;
3886 pa_assert(pa_sample_format_valid(format));
3888 old_format = s->sample_spec.format;
3889 if (old_format == format)
3892 pa_log_info("%s: format: %s -> %s",
3893 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
3895 s->sample_spec.format = format;
3897 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3900 /* Called from the main thread */
3901 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
3905 pa_assert(pa_sample_rate_valid(rate));
3907 old_rate = s->sample_spec.rate;
3908 if (old_rate == rate)
3911 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
3913 s->sample_spec.rate = rate;
3915 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3918 /* Called from the main thread. */
3919 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3920 pa_cvolume old_volume;
3921 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3922 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3927 old_volume = s->reference_volume;
3929 if (pa_cvolume_equal(volume, &old_volume))
3932 s->reference_volume = *volume;
3933 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3934 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3935 s->flags & PA_SINK_DECIBEL_VOLUME),
3936 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3937 s->flags & PA_SINK_DECIBEL_VOLUME));
3939 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3940 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
3943 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
3946 bool old_sink_is_unavailable = false;
3949 pa_assert(old_sink);
3951 if (core->state == PA_CORE_SHUTDOWN)
3954 if (core->default_sink == NULL || core->default_sink->unlink_requested)
3957 if (old_sink == core->default_sink)
3960 if (old_sink->active_port && old_sink->active_port->available == PA_AVAILABLE_NO)
3961 old_sink_is_unavailable = true;
3963 PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
3964 if (!PA_SINK_INPUT_IS_LINKED(i->state))
3970 if (pa_safe_streq(old_sink->name, i->preferred_sink) && !old_sink_is_unavailable)
3973 if (!pa_sink_input_may_move_to(i, core->default_sink))
3976 if (default_sink_changed)
3977 pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
3978 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
3980 pa_log_info("The sink input %u \"%s\" is moving to %s due to unlink of a sink.",
3981 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
3983 pa_sink_input_move_to(i, core->default_sink, false);