2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (pa_page_size())
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 struct pa_sink_volume_change {
64 PA_LLIST_FIELDS(pa_sink_volume_change);
67 struct set_state_data {
68 pa_sink_state_t state;
69 pa_suspend_cause_t suspend_cause;
72 static void sink_free(pa_object *s);
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
82 data->proplist = pa_proplist_new();
83 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
92 data->name = pa_xstrdup(name);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 data->alternate_sample_rate_is_set = true;
113 data->alternate_sample_rate = alternate_sample_rate;
116 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
119 data->avoid_resampling_is_set = true;
120 data->avoid_resampling = avoid_resampling;
123 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
126 if ((data->volume_is_set = !!volume))
127 data->volume = *volume;
130 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
133 data->muted_is_set = true;
137 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
140 pa_xfree(data->active_port);
141 data->active_port = pa_xstrdup(port);
144 void pa_sink_new_data_done(pa_sink_new_data *data) {
147 pa_proplist_free(data->proplist);
150 pa_hashmap_free(data->ports);
152 pa_xfree(data->name);
153 pa_xfree(data->active_port);
156 /* Called from main context */
157 static void reset_callbacks(pa_sink *s) {
160 s->set_state_in_main_thread = NULL;
161 s->set_state_in_io_thread = NULL;
162 s->get_volume = NULL;
163 s->set_volume = NULL;
164 s->write_volume = NULL;
167 s->request_rewind = NULL;
168 s->update_requested_latency = NULL;
170 s->get_formats = NULL;
171 s->set_formats = NULL;
172 s->reconfigure = NULL;
175 /* Called from main context */
176 pa_sink* pa_sink_new(
178 pa_sink_new_data *data,
179 pa_sink_flags_t flags) {
183 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
184 pa_source_new_data source_data;
190 pa_assert(data->name);
191 pa_assert_ctl_context();
193 s = pa_msgobject_new(pa_sink);
195 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
196 pa_log_debug("Failed to register name %s.", data->name);
201 pa_sink_new_data_set_name(data, name);
203 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
205 pa_namereg_unregister(core, name);
209 /* FIXME, need to free s here on failure */
211 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
212 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
214 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
216 if (!data->channel_map_is_set)
217 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
219 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
220 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
222 /* FIXME: There should probably be a general function for checking whether
223 * the sink volume is allowed to be set, like there is for sink inputs. */
224 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
226 if (!data->volume_is_set) {
227 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
228 data->save_volume = false;
231 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
232 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
234 if (!data->muted_is_set)
238 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
240 pa_device_init_description(data->proplist, data->card);
241 pa_device_init_icon(data->proplist, true);
242 pa_device_init_intended_roles(data->proplist);
244 if (!data->active_port) {
245 pa_device_port *p = pa_device_port_find_best(data->ports);
247 pa_sink_new_data_set_port(data, p->name);
250 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
252 pa_namereg_unregister(core, name);
256 s->parent.parent.free = sink_free;
257 s->parent.process_msg = pa_sink_process_msg;
260 s->state = PA_SINK_INIT;
263 s->suspend_cause = data->suspend_cause;
264 s->name = pa_xstrdup(name);
265 s->proplist = pa_proplist_copy(data->proplist);
266 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
267 s->module = data->module;
268 s->card = data->card;
270 s->priority = pa_device_init_priority(s->proplist);
272 s->sample_spec = data->sample_spec;
273 s->channel_map = data->channel_map;
274 s->default_sample_rate = s->sample_spec.rate;
276 if (data->alternate_sample_rate_is_set)
277 s->alternate_sample_rate = data->alternate_sample_rate;
279 s->alternate_sample_rate = s->core->alternate_sample_rate;
281 if (data->avoid_resampling_is_set)
282 s->avoid_resampling = data->avoid_resampling;
284 s->avoid_resampling = s->core->avoid_resampling;
286 s->inputs = pa_idxset_new(NULL, NULL);
288 s->input_to_master = NULL;
290 s->reference_volume = s->real_volume = data->volume;
291 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
292 s->base_volume = PA_VOLUME_NORM;
293 s->n_volume_steps = PA_VOLUME_NORM+1;
294 s->muted = data->muted;
295 s->refresh_volume = s->refresh_muted = false;
302 /* As a minor optimization we just steal the list instead of
304 s->ports = data->ports;
307 s->active_port = NULL;
308 s->save_port = false;
310 if (data->active_port)
311 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
312 s->save_port = data->save_port;
314 /* Hopefully the active port has already been assigned in the previous call
315 to pa_device_port_find_best, but better safe than sorry */
317 s->active_port = pa_device_port_find_best(s->ports);
320 s->port_latency_offset = s->active_port->latency_offset;
322 s->port_latency_offset = 0;
324 s->save_volume = data->save_volume;
325 s->save_muted = data->save_muted;
327 pa_silence_memchunk_get(
328 &core->silence_cache,
334 s->thread_info.rtpoll = NULL;
335 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
336 (pa_free_cb_t) pa_sink_input_unref);
337 s->thread_info.soft_volume = s->soft_volume;
338 s->thread_info.soft_muted = s->muted;
339 s->thread_info.state = s->state;
340 s->thread_info.rewind_nbytes = 0;
341 s->thread_info.rewind_requested = false;
342 s->thread_info.max_rewind = 0;
343 s->thread_info.max_request = 0;
344 s->thread_info.requested_latency_valid = false;
345 s->thread_info.requested_latency = 0;
346 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
347 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
348 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
350 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
351 s->thread_info.volume_changes_tail = NULL;
352 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
353 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
354 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
355 s->thread_info.port_latency_offset = s->port_latency_offset;
357 /* FIXME: This should probably be moved to pa_sink_put() */
358 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
361 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
363 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
364 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
367 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
368 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
372 pa_source_new_data_init(&source_data);
373 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
374 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
375 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
376 pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
377 source_data.name = pa_sprintf_malloc("%s.monitor", name);
378 source_data.driver = data->driver;
379 source_data.module = data->module;
380 source_data.card = data->card;
382 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
383 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
384 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
386 s->monitor_source = pa_source_new(core, &source_data,
387 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
388 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
390 pa_source_new_data_done(&source_data);
392 if (!s->monitor_source) {
398 s->monitor_source->monitor_of = s;
400 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
401 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
402 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
407 /* Called from main context */
408 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
411 bool suspend_cause_changed;
414 pa_sink_state_t old_state;
415 pa_suspend_cause_t old_suspend_cause;
418 pa_assert_ctl_context();
420 state_changed = state != s->state;
421 suspend_cause_changed = suspend_cause != s->suspend_cause;
423 if (!state_changed && !suspend_cause_changed)
426 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
427 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
429 /* If we are resuming, suspend_cause must be 0. */
430 pa_assert(!resuming || !suspend_cause);
432 /* Here's something to think about: what to do with the suspend cause if
433 * resuming the sink fails? The old suspend cause will be incorrect, so we
434 * can't use that. On the other hand, if we set no suspend cause (as is the
435 * case currently), then it looks strange to have a sink suspended without
436 * any cause. It might be a good idea to add a new "resume failed" suspend
437 * cause, or it might just add unnecessary complexity, given that the
438 * current approach of not setting any suspend cause works well enough. */
440 if (s->set_state_in_main_thread) {
441 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
442 /* set_state_in_main_thread() is allowed to fail only when resuming. */
445 /* If resuming fails, we set the state to SUSPENDED and
446 * suspend_cause to 0. */
447 state = PA_SINK_SUSPENDED;
449 state_changed = false;
450 suspend_cause_changed = suspend_cause != s->suspend_cause;
453 /* We know the state isn't changing. If the suspend cause isn't
454 * changing either, then there's nothing more to do. */
455 if (!suspend_cause_changed)
461 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
463 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
464 /* SET_STATE is allowed to fail only when resuming. */
467 if (s->set_state_in_main_thread)
468 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
470 /* If resuming fails, we set the state to SUSPENDED and
471 * suspend_cause to 0. */
472 state = PA_SINK_SUSPENDED;
474 state_changed = false;
475 suspend_cause_changed = suspend_cause != s->suspend_cause;
478 /* We know the state isn't changing. If the suspend cause isn't
479 * changing either, then there's nothing more to do. */
480 if (!suspend_cause_changed)
485 old_suspend_cause = s->suspend_cause;
486 if (suspend_cause_changed) {
487 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
488 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
490 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
491 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
492 s->suspend_cause = suspend_cause;
495 old_state = s->state;
497 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
500 /* If we enter UNLINKED state, then we don't send change notifications.
501 * pa_sink_unlink() will send unlink notifications instead. */
502 if (state != PA_SINK_UNLINKED) {
503 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
504 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
508 if (suspending || resuming || suspend_cause_changed) {
512 /* We're suspending or resuming, tell everyone about it */
514 PA_IDXSET_FOREACH(i, s->inputs, idx)
515 if (s->state == PA_SINK_SUSPENDED &&
516 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
517 pa_sink_input_kill(i);
519 i->suspend(i, old_state, old_suspend_cause);
522 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
523 pa_source_sync_suspend(s->monitor_source);
528 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
534 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
535 pa_sink_flags_t flags;
538 pa_assert(!s->write_volume || cb);
542 /* Save the current flags so we can tell if they've changed */
546 /* The sink implementor is responsible for setting decibel volume support */
547 s->flags |= PA_SINK_HW_VOLUME_CTRL;
549 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
550 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
551 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
554 /* If the flags have changed after init, let any clients know via a change event */
555 if (s->state != PA_SINK_INIT && flags != s->flags)
556 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
559 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
560 pa_sink_flags_t flags;
563 pa_assert(!cb || s->set_volume);
565 s->write_volume = cb;
567 /* Save the current flags so we can tell if they've changed */
571 s->flags |= PA_SINK_DEFERRED_VOLUME;
573 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
575 /* If the flags have changed after init, let any clients know via a change event */
576 if (s->state != PA_SINK_INIT && flags != s->flags)
577 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
580 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
586 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
587 pa_sink_flags_t flags;
593 /* Save the current flags so we can tell if they've changed */
597 s->flags |= PA_SINK_HW_MUTE_CTRL;
599 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
601 /* If the flags have changed after init, let any clients know via a change event */
602 if (s->state != PA_SINK_INIT && flags != s->flags)
603 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
606 static void enable_flat_volume(pa_sink *s, bool enable) {
607 pa_sink_flags_t flags;
611 /* Always follow the overall user preference here */
612 enable = enable && s->core->flat_volumes;
614 /* Save the current flags so we can tell if they've changed */
618 s->flags |= PA_SINK_FLAT_VOLUME;
620 s->flags &= ~PA_SINK_FLAT_VOLUME;
622 /* If the flags have changed after init, let any clients know via a change event */
623 if (s->state != PA_SINK_INIT && flags != s->flags)
624 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
627 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
628 pa_sink_flags_t flags;
632 /* Save the current flags so we can tell if they've changed */
636 s->flags |= PA_SINK_DECIBEL_VOLUME;
637 enable_flat_volume(s, true);
639 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
640 enable_flat_volume(s, false);
643 /* If the flags have changed after init, let any clients know via a change event */
644 if (s->state != PA_SINK_INIT && flags != s->flags)
645 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
648 /* Called from main context */
649 void pa_sink_put(pa_sink* s) {
650 pa_sink_assert_ref(s);
651 pa_assert_ctl_context();
653 pa_assert(s->state == PA_SINK_INIT);
654 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
656 /* The following fields must be initialized properly when calling _put() */
657 pa_assert(s->asyncmsgq);
658 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
660 /* Generally, flags should be initialized via pa_sink_new(). As a
661 * special exception we allow some volume related flags to be set
662 * between _new() and _put() by the callback setter functions above.
664 * Thus we implement a couple safeguards here which ensure the above
665 * setters were used (or at least the implementor made manual changes
666 * in a compatible way).
668 * Note: All of these flags set here can change over the life time
670 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
671 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
672 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
674 /* XXX: Currently decibel volume is disabled for all sinks that use volume
675 * sharing. When the master sink supports decibel volume, it would be good
676 * to have the flag also in the filter sink, but currently we don't do that
677 * so that the flags of the filter sink never change when it's moved from
678 * a master sink to another. One solution for this problem would be to
679 * remove user-visible volume altogether from filter sinks when volume
680 * sharing is used, but the current approach was easier to implement... */
681 /* We always support decibel volumes in software, otherwise we leave it to
682 * the sink implementor to set this flag as needed.
684 * Note: This flag can also change over the life time of the sink. */
685 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
686 pa_sink_enable_decibel_volume(s, true);
687 s->soft_volume = s->reference_volume;
690 /* If the sink implementor support DB volumes by itself, we should always
691 * try and enable flat volumes too */
692 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
693 enable_flat_volume(s, true);
695 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
696 pa_sink *root_sink = pa_sink_get_master(s);
698 pa_assert(root_sink);
700 s->reference_volume = root_sink->reference_volume;
701 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
703 s->real_volume = root_sink->real_volume;
704 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
706 /* We assume that if the sink implementor changed the default
707 * volume they did so in real_volume, because that is the usual
708 * place where they are supposed to place their changes. */
709 s->reference_volume = s->real_volume;
711 s->thread_info.soft_volume = s->soft_volume;
712 s->thread_info.soft_muted = s->muted;
713 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
715 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
716 || (s->base_volume == PA_VOLUME_NORM
717 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
718 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
719 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
720 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
721 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
723 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
724 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
725 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
727 if (s->suspend_cause)
728 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
730 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
732 pa_source_put(s->monitor_source);
734 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
735 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
737 /* It's good to fire the SINK_PUT hook before updating the default sink,
738 * because module-switch-on-connect will set the new sink as the default
739 * sink, and if we were to call pa_core_update_default_sink() before that,
740 * the default sink might change twice, causing unnecessary stream moving. */
742 pa_core_update_default_sink(s->core);
744 pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
747 /* Called from main context */
748 void pa_sink_unlink(pa_sink* s) {
750 pa_sink_input *i, PA_UNUSED *j = NULL;
752 pa_sink_assert_ref(s);
753 pa_assert_ctl_context();
755 /* Please note that pa_sink_unlink() does more than simply
756 * reversing pa_sink_put(). It also undoes the registrations
757 * already done in pa_sink_new()! */
759 if (s->unlink_requested)
762 s->unlink_requested = true;
764 linked = PA_SINK_IS_LINKED(s->state);
767 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
769 if (s->state != PA_SINK_UNLINKED)
770 pa_namereg_unregister(s->core, s->name);
771 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
773 pa_core_update_default_sink(s->core);
775 if (linked && s->core->rescue_streams)
776 pa_sink_move_streams_to_default_sink(s->core, s, false);
779 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
781 while ((i = pa_idxset_first(s->inputs, NULL))) {
783 pa_sink_input_kill(i);
788 /* It's important to keep the suspend cause unchanged when unlinking,
789 * because if we remove the SESSION suspend cause here, the alsa sink
790 * will sync its volume with the hardware while another user is
791 * active, messing up the volume for that other user. */
792 sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
794 s->state = PA_SINK_UNLINKED;
798 if (s->monitor_source)
799 pa_source_unlink(s->monitor_source);
802 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
803 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
807 /* Called from main context */
808 static void sink_free(pa_object *o) {
809 pa_sink *s = PA_SINK(o);
812 pa_assert_ctl_context();
813 pa_assert(pa_sink_refcnt(s) == 0);
814 pa_assert(!PA_SINK_IS_LINKED(s->state));
816 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
818 pa_sink_volume_change_flush(s);
820 if (s->monitor_source) {
821 pa_source_unref(s->monitor_source);
822 s->monitor_source = NULL;
825 pa_idxset_free(s->inputs, NULL);
826 pa_hashmap_free(s->thread_info.inputs);
828 if (s->silence.memblock)
829 pa_memblock_unref(s->silence.memblock);
835 pa_proplist_free(s->proplist);
838 pa_hashmap_free(s->ports);
843 /* Called from main context, and not while the IO thread is active, please */
844 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
845 pa_sink_assert_ref(s);
846 pa_assert_ctl_context();
850 if (s->monitor_source)
851 pa_source_set_asyncmsgq(s->monitor_source, q);
854 /* Called from main context, and not while the IO thread is active, please */
855 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
856 pa_sink_flags_t old_flags;
857 pa_sink_input *input;
860 pa_sink_assert_ref(s);
861 pa_assert_ctl_context();
863 /* For now, allow only a minimal set of flags to be changed. */
864 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
866 old_flags = s->flags;
867 s->flags = (s->flags & ~mask) | (value & mask);
869 if (s->flags == old_flags)
872 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
873 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
875 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
876 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
877 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
879 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
880 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
882 if (s->monitor_source)
883 pa_source_update_flags(s->monitor_source,
884 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
885 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
886 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
887 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
889 PA_IDXSET_FOREACH(input, s->inputs, idx) {
890 if (input->origin_sink)
891 pa_sink_update_flags(input->origin_sink, mask, value);
895 /* Called from IO context, or before _put() from main context */
896 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
897 pa_sink_assert_ref(s);
898 pa_sink_assert_io_context(s);
900 s->thread_info.rtpoll = p;
902 if (s->monitor_source)
903 pa_source_set_rtpoll(s->monitor_source, p);
906 /* Called from main context */
907 int pa_sink_update_status(pa_sink*s) {
908 pa_sink_assert_ref(s);
909 pa_assert_ctl_context();
910 pa_assert(PA_SINK_IS_LINKED(s->state));
912 if (s->state == PA_SINK_SUSPENDED)
915 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
918 /* Called from main context */
919 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
920 pa_suspend_cause_t merged_cause;
922 pa_sink_assert_ref(s);
923 pa_assert_ctl_context();
924 pa_assert(PA_SINK_IS_LINKED(s->state));
925 pa_assert(cause != 0);
928 merged_cause = s->suspend_cause | cause;
930 merged_cause = s->suspend_cause & ~cause;
933 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
935 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
938 /* Called from main context */
939 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
940 pa_sink_input *i, *n;
943 pa_sink_assert_ref(s);
944 pa_assert_ctl_context();
945 pa_assert(PA_SINK_IS_LINKED(s->state));
950 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
951 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
953 pa_sink_input_ref(i);
955 if (pa_sink_input_start_move(i) >= 0)
958 pa_sink_input_unref(i);
964 /* Called from main context */
965 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
968 pa_sink_assert_ref(s);
969 pa_assert_ctl_context();
970 pa_assert(PA_SINK_IS_LINKED(s->state));
973 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
974 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
975 if (pa_sink_input_finish_move(i, s, save) < 0)
976 pa_sink_input_fail_move(i);
979 pa_sink_input_unref(i);
982 pa_queue_free(q, NULL);
985 /* Called from main context */
986 void pa_sink_move_all_fail(pa_queue *q) {
989 pa_assert_ctl_context();
992 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
993 pa_sink_input_fail_move(i);
994 pa_sink_input_unref(i);
997 pa_queue_free(q, NULL);
1000 /* Called from IO thread context */
1001 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1006 pa_sink_assert_ref(s);
1007 pa_sink_assert_io_context(s);
1009 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1010 size_t uf = i->thread_info.underrun_for_sink;
1012 /* Propagate down the filter tree */
1013 if (i->origin_sink) {
1014 size_t filter_result, left_to_play_origin;
1016 /* The recursive call works in the origin sink domain ... */
1017 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1019 /* .. and returns the time to sleep before waking up. We need the
1020 * underrun duration for comparisons, so we undo the subtraction on
1021 * the return value... */
1022 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1024 /* ... and convert it back to the master sink domain */
1025 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1027 /* Remember the longest underrun so far */
1028 if (filter_result > result)
1029 result = filter_result;
1033 /* No underrun here, move on */
1035 } else if (uf >= left_to_play) {
1036 /* The sink has possibly consumed all the data the sink input provided */
1037 pa_sink_input_process_underrun(i);
1038 } else if (uf > result) {
1039 /* Remember the longest underrun so far */
1045 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1046 (long) result, (long) left_to_play - result);
1047 return left_to_play - result;
1050 /* Called from IO thread context */
1051 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1055 pa_sink_assert_ref(s);
1056 pa_sink_assert_io_context(s);
1057 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1059 /* If nobody requested this and this is actually no real rewind
1060 * then we can short cut this. Please note that this means that
1061 * not all rewind requests triggered upstream will always be
1062 * translated in actual requests! */
1063 if (!s->thread_info.rewind_requested && nbytes <= 0)
1066 s->thread_info.rewind_nbytes = 0;
1067 s->thread_info.rewind_requested = false;
1070 pa_log_debug("Processing rewind...");
1071 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1072 pa_sink_volume_change_rewind(s, nbytes);
1075 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1076 pa_sink_input_assert_ref(i);
1077 pa_sink_input_process_rewind(i, nbytes);
1081 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1082 pa_source_process_rewind(s->monitor_source, nbytes);
1086 /* Called from IO thread context */
1087 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1091 size_t mixlength = *length;
1093 pa_sink_assert_ref(s);
1094 pa_sink_assert_io_context(s);
1097 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1098 pa_sink_input_assert_ref(i);
1100 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1102 if (mixlength == 0 || info->chunk.length < mixlength)
1103 mixlength = info->chunk.length;
1105 if (pa_memblock_is_silence(info->chunk.memblock)) {
1106 pa_memblock_unref(info->chunk.memblock);
1110 info->userdata = pa_sink_input_ref(i);
1112 pa_assert(info->chunk.memblock);
1113 pa_assert(info->chunk.length > 0);
1121 *length = mixlength;
1126 /* Called from IO thread context */
1127 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1131 unsigned n_unreffed = 0;
1133 pa_sink_assert_ref(s);
1134 pa_sink_assert_io_context(s);
1136 pa_assert(result->memblock);
1137 pa_assert(result->length > 0);
1139 /* We optimize for the case where the order of the inputs has not changed */
1141 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1143 pa_mix_info* m = NULL;
1145 pa_sink_input_assert_ref(i);
1147 /* Let's try to find the matching entry info the pa_mix_info array */
1148 for (j = 0; j < n; j ++) {
1150 if (info[p].userdata == i) {
1160 /* Drop read data */
1161 pa_sink_input_drop(i, result->length);
1163 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1165 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1166 void *ostate = NULL;
1167 pa_source_output *o;
1170 if (m && m->chunk.memblock) {
1172 pa_memblock_ref(c.memblock);
1173 pa_assert(result->length <= c.length);
1174 c.length = result->length;
1176 pa_memchunk_make_writable(&c, 0);
1177 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1180 pa_memblock_ref(c.memblock);
1181 pa_assert(result->length <= c.length);
1182 c.length = result->length;
1185 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1186 pa_source_output_assert_ref(o);
1187 pa_assert(o->direct_on_input == i);
1188 pa_source_post_direct(s->monitor_source, o, &c);
1191 pa_memblock_unref(c.memblock);
1196 if (m->chunk.memblock) {
1197 pa_memblock_unref(m->chunk.memblock);
1198 pa_memchunk_reset(&m->chunk);
1201 pa_sink_input_unref(m->userdata);
1208 /* Now drop references to entries that are included in the
1209 * pa_mix_info array but don't exist anymore */
1211 if (n_unreffed < n) {
1212 for (; n > 0; info++, n--) {
1214 pa_sink_input_unref(info->userdata);
1215 if (info->chunk.memblock)
1216 pa_memblock_unref(info->chunk.memblock);
1220 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1221 pa_source_post(s->monitor_source, result);
1224 /* Called from IO thread context */
1225 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1226 pa_mix_info info[MAX_MIX_CHANNELS];
1228 size_t block_size_max;
1230 pa_sink_assert_ref(s);
1231 pa_sink_assert_io_context(s);
1232 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1233 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1236 pa_assert(!s->thread_info.rewind_requested);
1237 pa_assert(s->thread_info.rewind_nbytes == 0);
1239 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1240 result->memblock = pa_memblock_ref(s->silence.memblock);
1241 result->index = s->silence.index;
1242 result->length = PA_MIN(s->silence.length, length);
1249 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1251 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1252 if (length > block_size_max)
1253 length = pa_frame_align(block_size_max, &s->sample_spec);
1255 pa_assert(length > 0);
1257 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1261 *result = s->silence;
1262 pa_memblock_ref(result->memblock);
1264 if (result->length > length)
1265 result->length = length;
1267 } else if (n == 1) {
1270 *result = info[0].chunk;
1271 pa_memblock_ref(result->memblock);
1273 if (result->length > length)
1274 result->length = length;
1276 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1278 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1279 pa_memblock_unref(result->memblock);
1280 pa_silence_memchunk_get(&s->core->silence_cache,
1285 } else if (!pa_cvolume_is_norm(&volume)) {
1286 pa_memchunk_make_writable(result, 0);
1287 pa_volume_memchunk(result, &s->sample_spec, &volume);
1291 result->memblock = pa_memblock_new(s->core->mempool, length);
1293 ptr = pa_memblock_acquire(result->memblock);
1294 result->length = pa_mix(info, n,
1297 &s->thread_info.soft_volume,
1298 s->thread_info.soft_muted);
1299 pa_memblock_release(result->memblock);
1304 inputs_drop(s, info, n, result);
1309 /* Called from IO thread context */
1310 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1311 pa_mix_info info[MAX_MIX_CHANNELS];
1313 size_t length, block_size_max;
1315 pa_sink_assert_ref(s);
1316 pa_sink_assert_io_context(s);
1317 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1319 pa_assert(target->memblock);
1320 pa_assert(target->length > 0);
1321 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1323 pa_assert(!s->thread_info.rewind_requested);
1324 pa_assert(s->thread_info.rewind_nbytes == 0);
1326 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1327 pa_silence_memchunk(target, &s->sample_spec);
1333 length = target->length;
1334 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1335 if (length > block_size_max)
1336 length = pa_frame_align(block_size_max, &s->sample_spec);
1338 pa_assert(length > 0);
1340 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1343 if (target->length > length)
1344 target->length = length;
1346 pa_silence_memchunk(target, &s->sample_spec);
1347 } else if (n == 1) {
1350 if (target->length > length)
1351 target->length = length;
1353 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1355 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1356 pa_silence_memchunk(target, &s->sample_spec);
1360 vchunk = info[0].chunk;
1361 pa_memblock_ref(vchunk.memblock);
1363 if (vchunk.length > length)
1364 vchunk.length = length;
1366 if (!pa_cvolume_is_norm(&volume)) {
1367 pa_memchunk_make_writable(&vchunk, 0);
1368 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1371 pa_memchunk_memcpy(target, &vchunk);
1372 pa_memblock_unref(vchunk.memblock);
1378 ptr = pa_memblock_acquire(target->memblock);
1380 target->length = pa_mix(info, n,
1381 (uint8_t*) ptr + target->index, length,
1383 &s->thread_info.soft_volume,
1384 s->thread_info.soft_muted);
1386 pa_memblock_release(target->memblock);
1389 inputs_drop(s, info, n, target);
1394 /* Called from IO thread context */
1395 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1399 pa_sink_assert_ref(s);
1400 pa_sink_assert_io_context(s);
1401 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1403 pa_assert(target->memblock);
1404 pa_assert(target->length > 0);
1405 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1407 pa_assert(!s->thread_info.rewind_requested);
1408 pa_assert(s->thread_info.rewind_nbytes == 0);
1410 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1411 pa_silence_memchunk(target, &s->sample_spec);
1424 pa_sink_render_into(s, &chunk);
1433 /* Called from IO thread context */
1434 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1435 pa_sink_assert_ref(s);
1436 pa_sink_assert_io_context(s);
1437 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1438 pa_assert(length > 0);
1439 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1442 pa_assert(!s->thread_info.rewind_requested);
1443 pa_assert(s->thread_info.rewind_nbytes == 0);
1447 pa_sink_render(s, length, result);
1449 if (result->length < length) {
1452 pa_memchunk_make_writable(result, length);
1454 chunk.memblock = result->memblock;
1455 chunk.index = result->index + result->length;
1456 chunk.length = length - result->length;
1458 pa_sink_render_into_full(s, &chunk);
1460 result->length = length;
1466 /* Called from main thread */
1467 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1468 pa_sample_spec desired_spec;
1469 uint32_t default_rate = s->default_sample_rate;
1470 uint32_t alternate_rate = s->alternate_sample_rate;
1473 bool default_rate_is_usable = false;
1474 bool alternate_rate_is_usable = false;
1475 bool avoid_resampling = s->avoid_resampling;
1477 if (pa_sample_spec_equal(spec, &s->sample_spec))
1480 if (!s->reconfigure)
1483 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1484 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1488 if (PA_SINK_IS_RUNNING(s->state)) {
1489 pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1490 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1494 if (s->monitor_source) {
1495 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1496 pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1501 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1504 desired_spec = s->sample_spec;
1507 /* We have to try to use the sink input format and rate */
1508 desired_spec.format = spec->format;
1509 desired_spec.rate = spec->rate;
1511 } else if (avoid_resampling) {
1512 /* We just try to set the sink input's sample rate if it's not too low */
1513 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1514 desired_spec.rate = spec->rate;
1515 desired_spec.format = spec->format;
1517 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1518 /* We can directly try to use this rate */
1519 desired_spec.rate = spec->rate;
1523 if (desired_spec.rate != spec->rate) {
1524 /* See if we can pick a rate that results in less resampling effort */
1525 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1526 default_rate_is_usable = true;
1527 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1528 default_rate_is_usable = true;
1529 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1530 alternate_rate_is_usable = true;
1531 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1532 alternate_rate_is_usable = true;
1534 if (alternate_rate_is_usable && !default_rate_is_usable)
1535 desired_spec.rate = alternate_rate;
1537 desired_spec.rate = default_rate;
1540 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1543 if (!passthrough && pa_sink_used_by(s) > 0)
1546 pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1547 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1548 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1550 s->reconfigure(s, &desired_spec, passthrough);
1552 /* update monitor source as well */
1553 if (s->monitor_source && !passthrough)
1554 pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1555 pa_log_info("Reconfigured successfully");
1557 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1558 if (i->state == PA_SINK_INPUT_CORKED)
1559 pa_sink_input_update_resampler(i);
1562 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1565 /* Called from main thread */
1566 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1569 pa_sink_assert_ref(s);
1570 pa_assert_ctl_context();
1571 pa_assert(PA_SINK_IS_LINKED(s->state));
1573 /* The returned value is supposed to be in the time domain of the sound card! */
1575 if (s->state == PA_SINK_SUSPENDED)
1578 if (!(s->flags & PA_SINK_LATENCY))
1581 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1583 /* the return value is unsigned, so check that the offset can be added to usec without
1585 if (-s->port_latency_offset <= usec)
1586 usec += s->port_latency_offset;
1590 return (pa_usec_t)usec;
1593 /* Called from IO thread */
1594 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1598 pa_sink_assert_ref(s);
1599 pa_sink_assert_io_context(s);
1600 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1602 /* The returned value is supposed to be in the time domain of the sound card! */
1604 if (s->thread_info.state == PA_SINK_SUSPENDED)
1607 if (!(s->flags & PA_SINK_LATENCY))
1610 o = PA_MSGOBJECT(s);
1612 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1614 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1616 /* If allow_negative is false, the call should only return positive values, */
1617 usec += s->thread_info.port_latency_offset;
1618 if (!allow_negative && usec < 0)
1624 /* Called from the main thread (and also from the IO thread while the main
1625 * thread is waiting).
1627 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1628 * set. Instead, flat volume mode is detected by checking whether the root sink
1629 * has the flag set. */
1630 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1631 pa_sink_assert_ref(s);
1633 s = pa_sink_get_master(s);
1636 return (s->flags & PA_SINK_FLAT_VOLUME);
1641 /* Called from the main thread (and also from the IO thread while the main
1642 * thread is waiting). */
1643 pa_sink *pa_sink_get_master(pa_sink *s) {
1644 pa_sink_assert_ref(s);
1646 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1647 if (PA_UNLIKELY(!s->input_to_master))
1650 s = s->input_to_master->sink;
1656 /* Called from main context */
1657 bool pa_sink_is_filter(pa_sink *s) {
1658 pa_sink_assert_ref(s);
1660 return (s->input_to_master != NULL);
1663 /* Called from main context */
1664 bool pa_sink_is_passthrough(pa_sink *s) {
1665 pa_sink_input *alt_i;
1668 pa_sink_assert_ref(s);
1670 /* one and only one PASSTHROUGH input can possibly be connected */
1671 if (pa_idxset_size(s->inputs) == 1) {
1672 alt_i = pa_idxset_first(s->inputs, &idx);
1674 if (pa_sink_input_is_passthrough(alt_i))
1681 /* Called from main context */
1682 void pa_sink_enter_passthrough(pa_sink *s) {
1685 /* The sink implementation is reconfigured for passthrough in
1686 * pa_sink_reconfigure(). This function sets the PA core objects to
1687 * passthrough mode. */
1689 /* disable the monitor in passthrough mode */
1690 if (s->monitor_source) {
1691 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1692 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1695 /* set the volume to NORM */
1696 s->saved_volume = *pa_sink_get_volume(s, true);
1697 s->saved_save_volume = s->save_volume;
1699 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1700 pa_sink_set_volume(s, &volume, true, false);
1702 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1705 /* Called from main context */
1706 void pa_sink_leave_passthrough(pa_sink *s) {
1707 /* Unsuspend monitor */
1708 if (s->monitor_source) {
1709 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1710 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1713 /* Restore sink volume to what it was before we entered passthrough mode */
1714 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1716 pa_cvolume_init(&s->saved_volume);
1717 s->saved_save_volume = false;
1721 /* Called from main context. */
1722 static void compute_reference_ratio(pa_sink_input *i) {
1724 pa_cvolume remapped;
1728 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1731 * Calculates the reference ratio from the sink's reference
1732 * volume. This basically calculates:
1734 * i->reference_ratio = i->volume / i->sink->reference_volume
1737 remapped = i->sink->reference_volume;
1738 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1740 ratio = i->reference_ratio;
1742 for (c = 0; c < i->sample_spec.channels; c++) {
1744 /* We don't update when the sink volume is 0 anyway */
1745 if (remapped.values[c] <= PA_VOLUME_MUTED)
1748 /* Don't update the reference ratio unless necessary */
1749 if (pa_sw_volume_multiply(
1751 remapped.values[c]) == i->volume.values[c])
1754 ratio.values[c] = pa_sw_volume_divide(
1755 i->volume.values[c],
1756 remapped.values[c]);
1759 pa_sink_input_set_reference_ratio(i, &ratio);
1762 /* Called from main context. Only called for the root sink in volume sharing
1763 * cases, except for internal recursive calls. */
1764 static void compute_reference_ratios(pa_sink *s) {
1768 pa_sink_assert_ref(s);
1769 pa_assert_ctl_context();
1770 pa_assert(PA_SINK_IS_LINKED(s->state));
1771 pa_assert(pa_sink_flat_volume_enabled(s));
1773 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1774 compute_reference_ratio(i);
1776 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1777 && PA_SINK_IS_LINKED(i->origin_sink->state))
1778 compute_reference_ratios(i->origin_sink);
1782 /* Called from main context. Only called for the root sink in volume sharing
1783 * cases, except for internal recursive calls. */
1784 static void compute_real_ratios(pa_sink *s) {
1788 pa_sink_assert_ref(s);
1789 pa_assert_ctl_context();
1790 pa_assert(PA_SINK_IS_LINKED(s->state));
1791 pa_assert(pa_sink_flat_volume_enabled(s));
1793 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1795 pa_cvolume remapped;
1797 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1798 /* The origin sink uses volume sharing, so this input's real ratio
1799 * is handled as a special case - the real ratio must be 0 dB, and
1800 * as a result i->soft_volume must equal i->volume_factor. */
1801 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1802 i->soft_volume = i->volume_factor;
1804 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1805 compute_real_ratios(i->origin_sink);
1811 * This basically calculates:
1813 * i->real_ratio := i->volume / s->real_volume
1814 * i->soft_volume := i->real_ratio * i->volume_factor
1817 remapped = s->real_volume;
1818 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1820 i->real_ratio.channels = i->sample_spec.channels;
1821 i->soft_volume.channels = i->sample_spec.channels;
1823 for (c = 0; c < i->sample_spec.channels; c++) {
1825 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1826 /* We leave i->real_ratio untouched */
1827 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1831 /* Don't lose accuracy unless necessary */
1832 if (pa_sw_volume_multiply(
1833 i->real_ratio.values[c],
1834 remapped.values[c]) != i->volume.values[c])
1836 i->real_ratio.values[c] = pa_sw_volume_divide(
1837 i->volume.values[c],
1838 remapped.values[c]);
1840 i->soft_volume.values[c] = pa_sw_volume_multiply(
1841 i->real_ratio.values[c],
1842 i->volume_factor.values[c]);
1845 /* We don't copy the soft_volume to the thread_info data
1846 * here. That must be done by the caller */
1850 static pa_cvolume *cvolume_remap_minimal_impact(
1852 const pa_cvolume *template,
1853 const pa_channel_map *from,
1854 const pa_channel_map *to) {
1859 pa_assert(template);
1862 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1863 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1865 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1866 * mapping from sink input to sink volumes:
1868 * If template is a possible remapping from v it is used instead
1869 * of remapping anew.
1871 * If the channel maps don't match we set an all-channel volume on
1872 * the sink to ensure that changing a volume on one stream has no
1873 * effect that cannot be compensated for in another stream that
1874 * does not have the same channel map as the sink. */
1876 if (pa_channel_map_equal(from, to))
1880 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1885 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1889 /* Called from main thread. Only called for the root sink in volume sharing
1890 * cases, except for internal recursive calls. */
1891 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1895 pa_sink_assert_ref(s);
1896 pa_assert(max_volume);
1897 pa_assert(channel_map);
1898 pa_assert(pa_sink_flat_volume_enabled(s));
1900 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1901 pa_cvolume remapped;
1903 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1904 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1905 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1907 /* Ignore this input. The origin sink uses volume sharing, so this
1908 * input's volume will be set to be equal to the root sink's real
1909 * volume. Obviously this input's current volume must not then
1910 * affect what the root sink's real volume will be. */
1914 remapped = i->volume;
1915 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1916 pa_cvolume_merge(max_volume, max_volume, &remapped);
1920 /* Called from main thread. Only called for the root sink in volume sharing
1921 * cases, except for internal recursive calls. */
1922 static bool has_inputs(pa_sink *s) {
1926 pa_sink_assert_ref(s);
1928 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1929 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1936 /* Called from main thread. Only called for the root sink in volume sharing
1937 * cases, except for internal recursive calls. */
1938 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1942 pa_sink_assert_ref(s);
1943 pa_assert(new_volume);
1944 pa_assert(channel_map);
1946 s->real_volume = *new_volume;
1947 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1949 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1950 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1951 if (pa_sink_flat_volume_enabled(s)) {
1952 pa_cvolume new_input_volume;
1954 /* Follow the root sink's real volume. */
1955 new_input_volume = *new_volume;
1956 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
1957 pa_sink_input_set_volume_direct(i, &new_input_volume);
1958 compute_reference_ratio(i);
1961 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1962 update_real_volume(i->origin_sink, new_volume, channel_map);
1967 /* Called from main thread. Only called for the root sink in shared volume
1969 static void compute_real_volume(pa_sink *s) {
1970 pa_sink_assert_ref(s);
1971 pa_assert_ctl_context();
1972 pa_assert(PA_SINK_IS_LINKED(s->state));
1973 pa_assert(pa_sink_flat_volume_enabled(s));
1974 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1976 /* This determines the maximum volume of all streams and sets
1977 * s->real_volume accordingly. */
1979 if (!has_inputs(s)) {
1980 /* In the special case that we have no sink inputs we leave the
1981 * volume unmodified. */
1982 update_real_volume(s, &s->reference_volume, &s->channel_map);
1986 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1988 /* First let's determine the new maximum volume of all inputs
1989 * connected to this sink */
1990 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1991 update_real_volume(s, &s->real_volume, &s->channel_map);
1993 /* Then, let's update the real ratios/soft volumes of all inputs
1994 * connected to this sink */
1995 compute_real_ratios(s);
1998 /* Called from main thread. Only called for the root sink in shared volume
1999 * cases, except for internal recursive calls. */
2000 static void propagate_reference_volume(pa_sink *s) {
2004 pa_sink_assert_ref(s);
2005 pa_assert_ctl_context();
2006 pa_assert(PA_SINK_IS_LINKED(s->state));
2007 pa_assert(pa_sink_flat_volume_enabled(s));
2009 /* This is called whenever the sink volume changes that is not
2010 * caused by a sink input volume change. We need to fix up the
2011 * sink input volumes accordingly */
2013 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2014 pa_cvolume new_volume;
2016 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2017 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2018 propagate_reference_volume(i->origin_sink);
2020 /* Since the origin sink uses volume sharing, this input's volume
2021 * needs to be updated to match the root sink's real volume, but
2022 * that will be done later in update_real_volume(). */
2026 /* This basically calculates:
2028 * i->volume := s->reference_volume * i->reference_ratio */
2030 new_volume = s->reference_volume;
2031 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2032 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2033 pa_sink_input_set_volume_direct(i, &new_volume);
2037 /* Called from main thread. Only called for the root sink in volume sharing
2038 * cases, except for internal recursive calls. The return value indicates
2039 * whether any reference volume actually changed. */
2040 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2042 bool reference_volume_changed;
2046 pa_sink_assert_ref(s);
2047 pa_assert(PA_SINK_IS_LINKED(s->state));
2049 pa_assert(channel_map);
2050 pa_assert(pa_cvolume_valid(v));
2053 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2055 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2056 pa_sink_set_reference_volume_direct(s, &volume);
2058 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2060 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2061 /* If the root sink's volume doesn't change, then there can't be any
2062 * changes in the other sinks in the sink tree either.
2064 * It's probably theoretically possible that even if the root sink's
2065 * volume changes slightly, some filter sink doesn't change its volume
2066 * due to rounding errors. If that happens, we still want to propagate
2067 * the changed root sink volume to the sinks connected to the
2068 * intermediate sink that didn't change its volume. This theoretical
2069 * possibility is the reason why we have that !(s->flags &
2070 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2071 * notice even if we returned here false always if
2072 * reference_volume_changed is false. */
2075 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2076 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2077 && PA_SINK_IS_LINKED(i->origin_sink->state))
2078 update_reference_volume(i->origin_sink, v, channel_map, false);
2084 /* Called from main thread */
2085 void pa_sink_set_volume(
2087 const pa_cvolume *volume,
2091 pa_cvolume new_reference_volume;
2094 pa_sink_assert_ref(s);
2095 pa_assert_ctl_context();
2096 pa_assert(PA_SINK_IS_LINKED(s->state));
2097 pa_assert(!volume || pa_cvolume_valid(volume));
2098 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2099 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2101 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2102 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2103 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2104 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2108 /* In case of volume sharing, the volume is set for the root sink first,
2109 * from which it's then propagated to the sharing sinks. */
2110 root_sink = pa_sink_get_master(s);
2112 if (PA_UNLIKELY(!root_sink))
2115 /* As a special exception we accept mono volumes on all sinks --
2116 * even on those with more complex channel maps */
2119 if (pa_cvolume_compatible(volume, &s->sample_spec))
2120 new_reference_volume = *volume;
2122 new_reference_volume = s->reference_volume;
2123 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2126 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2128 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2129 if (pa_sink_flat_volume_enabled(root_sink)) {
2130 /* OK, propagate this volume change back to the inputs */
2131 propagate_reference_volume(root_sink);
2133 /* And now recalculate the real volume */
2134 compute_real_volume(root_sink);
2136 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2140 /* If volume is NULL we synchronize the sink's real and
2141 * reference volumes with the stream volumes. */
2143 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2145 /* Ok, let's determine the new real volume */
2146 compute_real_volume(root_sink);
2148 /* Let's 'push' the reference volume if necessary */
2149 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2150 /* If the sink and its root don't have the same number of channels, we need to remap */
2151 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2152 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2153 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2155 /* Now that the reference volume is updated, we can update the streams'
2156 * reference ratios. */
2157 compute_reference_ratios(root_sink);
2160 if (root_sink->set_volume) {
2161 /* If we have a function set_volume(), then we do not apply a
2162 * soft volume by default. However, set_volume() is free to
2163 * apply one to root_sink->soft_volume */
2165 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2166 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2167 root_sink->set_volume(root_sink);
2170 /* If we have no function set_volume(), then the soft volume
2171 * becomes the real volume */
2172 root_sink->soft_volume = root_sink->real_volume;
2174 /* This tells the sink that soft volume and/or real volume changed */
2176 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2179 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2180 * Only to be called by sink implementor */
2181 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2183 pa_sink_assert_ref(s);
2184 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2186 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2187 pa_sink_assert_io_context(s);
2189 pa_assert_ctl_context();
2192 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2194 s->soft_volume = *volume;
2196 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2197 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2199 s->thread_info.soft_volume = s->soft_volume;
2202 /* Called from the main thread. Only called for the root sink in volume sharing
2203 * cases, except for internal recursive calls. */
2204 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2208 pa_sink_assert_ref(s);
2209 pa_assert(old_real_volume);
2210 pa_assert_ctl_context();
2211 pa_assert(PA_SINK_IS_LINKED(s->state));
2213 /* This is called when the hardware's real volume changes due to
2214 * some external event. We copy the real volume into our
2215 * reference volume and then rebuild the stream volumes based on
2216 * i->real_ratio which should stay fixed. */
2218 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2219 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2222 /* 1. Make the real volume the reference volume */
2223 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2226 if (pa_sink_flat_volume_enabled(s)) {
2228 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2229 pa_cvolume new_volume;
2231 /* 2. Since the sink's reference and real volumes are equal
2232 * now our ratios should be too. */
2233 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2235 /* 3. Recalculate the new stream reference volume based on the
2236 * reference ratio and the sink's reference volume.
2238 * This basically calculates:
2240 * i->volume = s->reference_volume * i->reference_ratio
2242 * This is identical to propagate_reference_volume() */
2243 new_volume = s->reference_volume;
2244 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2245 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2246 pa_sink_input_set_volume_direct(i, &new_volume);
2248 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2249 && PA_SINK_IS_LINKED(i->origin_sink->state))
2250 propagate_real_volume(i->origin_sink, old_real_volume);
2254 /* Something got changed in the hardware. It probably makes sense
2255 * to save changed hw settings given that hw volume changes not
2256 * triggered by PA are almost certainly done by the user. */
2257 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2258 s->save_volume = true;
2261 /* Called from io thread */
2262 void pa_sink_update_volume_and_mute(pa_sink *s) {
2264 pa_sink_assert_io_context(s);
2266 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2269 /* Called from main thread */
2270 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2271 pa_sink_assert_ref(s);
2272 pa_assert_ctl_context();
2273 pa_assert(PA_SINK_IS_LINKED(s->state));
2275 if (s->refresh_volume || force_refresh) {
2276 struct pa_cvolume old_real_volume;
2278 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2280 old_real_volume = s->real_volume;
2282 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2285 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2287 update_real_volume(s, &s->real_volume, &s->channel_map);
2288 propagate_real_volume(s, &old_real_volume);
2291 return &s->reference_volume;
2294 /* Called from main thread. In volume sharing cases, only the root sink may
2296 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2297 pa_cvolume old_real_volume;
2299 pa_sink_assert_ref(s);
2300 pa_assert_ctl_context();
2301 pa_assert(PA_SINK_IS_LINKED(s->state));
2302 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2304 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2306 old_real_volume = s->real_volume;
2307 update_real_volume(s, new_real_volume, &s->channel_map);
2308 propagate_real_volume(s, &old_real_volume);
2311 /* Called from main thread */
2312 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2315 pa_sink_assert_ref(s);
2316 pa_assert_ctl_context();
2318 old_muted = s->muted;
2320 if (mute == old_muted) {
2321 s->save_muted |= save;
2326 s->save_muted = save;
2328 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2329 s->set_mute_in_progress = true;
2331 s->set_mute_in_progress = false;
2334 if (!PA_SINK_IS_LINKED(s->state))
2337 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2338 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2339 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2340 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2343 /* Called from main thread */
2344 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2346 pa_sink_assert_ref(s);
2347 pa_assert_ctl_context();
2348 pa_assert(PA_SINK_IS_LINKED(s->state));
2350 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2353 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2354 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2355 pa_sink_mute_changed(s, mute);
2357 if (s->get_mute(s, &mute) >= 0)
2358 pa_sink_mute_changed(s, mute);
2365 /* Called from main thread */
2366 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2367 pa_sink_assert_ref(s);
2368 pa_assert_ctl_context();
2369 pa_assert(PA_SINK_IS_LINKED(s->state));
2371 if (s->set_mute_in_progress)
2374 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2375 * but we must have this here also, because the save parameter of
2376 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2377 * the mute state when it shouldn't be saved). */
2378 if (new_muted == s->muted)
2381 pa_sink_set_mute(s, new_muted, true);
2384 /* Called from main thread */
2385 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2386 pa_sink_assert_ref(s);
2387 pa_assert_ctl_context();
2390 pa_proplist_update(s->proplist, mode, p);
2392 if (PA_SINK_IS_LINKED(s->state)) {
2393 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2394 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2400 /* Called from main thread */
2401 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2402 void pa_sink_set_description(pa_sink *s, const char *description) {
2404 pa_sink_assert_ref(s);
2405 pa_assert_ctl_context();
2407 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2410 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2412 if (old && description && pa_streq(old, description))
2416 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2418 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2420 if (s->monitor_source) {
2423 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2424 pa_source_set_description(s->monitor_source, n);
2428 if (PA_SINK_IS_LINKED(s->state)) {
2429 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2430 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2434 /* Called from main thread */
2435 unsigned pa_sink_linked_by(pa_sink *s) {
2438 pa_sink_assert_ref(s);
2439 pa_assert_ctl_context();
2440 pa_assert(PA_SINK_IS_LINKED(s->state));
2442 ret = pa_idxset_size(s->inputs);
2444 /* We add in the number of streams connected to us here. Please
2445 * note the asymmetry to pa_sink_used_by()! */
2447 if (s->monitor_source)
2448 ret += pa_source_linked_by(s->monitor_source);
2453 /* Called from main thread */
2454 unsigned pa_sink_used_by(pa_sink *s) {
2457 pa_sink_assert_ref(s);
2458 pa_assert_ctl_context();
2459 pa_assert(PA_SINK_IS_LINKED(s->state));
2461 ret = pa_idxset_size(s->inputs);
2462 pa_assert(ret >= s->n_corked);
2464 /* Streams connected to our monitor source do not matter for
2465 * pa_sink_used_by()!.*/
2467 return ret - s->n_corked;
2470 /* Called from main thread */
2471 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2476 pa_sink_assert_ref(s);
2477 pa_assert_ctl_context();
2479 if (!PA_SINK_IS_LINKED(s->state))
2484 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2485 if (i == ignore_input)
2488 /* We do not assert here. It is perfectly valid for a sink input to
2489 * be in the INIT state (i.e. created, marked done but not yet put)
2490 * and we should not care if it's unlinked as it won't contribute
2491 * towards our busy status.
2493 if (!PA_SINK_INPUT_IS_LINKED(i->state))
2496 if (i->state == PA_SINK_INPUT_CORKED)
2499 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2505 if (s->monitor_source)
2506 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2511 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2513 case PA_SINK_INIT: return "INIT";
2514 case PA_SINK_IDLE: return "IDLE";
2515 case PA_SINK_RUNNING: return "RUNNING";
2516 case PA_SINK_SUSPENDED: return "SUSPENDED";
2517 case PA_SINK_UNLINKED: return "UNLINKED";
2518 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2521 pa_assert_not_reached();
2524 /* Called from the IO thread */
2525 static void sync_input_volumes_within_thread(pa_sink *s) {
2529 pa_sink_assert_ref(s);
2530 pa_sink_assert_io_context(s);
2532 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2533 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2536 i->thread_info.soft_volume = i->soft_volume;
2537 pa_sink_input_request_rewind(i, 0, true, false, false);
2541 /* Called from the IO thread. Only called for the root sink in volume sharing
2542 * cases, except for internal recursive calls. */
2543 static void set_shared_volume_within_thread(pa_sink *s) {
2544 pa_sink_input *i = NULL;
2547 pa_sink_assert_ref(s);
2549 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2551 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2552 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2553 set_shared_volume_within_thread(i->origin_sink);
2557 /* Called from IO thread, except when it is not */
2558 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2559 pa_sink *s = PA_SINK(o);
2560 pa_sink_assert_ref(s);
2562 switch ((pa_sink_message_t) code) {
2564 case PA_SINK_MESSAGE_ADD_INPUT: {
2565 pa_sink_input *i = PA_SINK_INPUT(userdata);
2567 /* If you change anything here, make sure to change the
2568 * sink input handling a few lines down at
2569 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2571 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2573 /* Since the caller sleeps in pa_sink_input_put(), we can
2574 * safely access data outside of thread_info even though
2577 if ((i->thread_info.sync_prev = i->sync_prev)) {
2578 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2579 pa_assert(i->sync_prev->sync_next == i);
2580 i->thread_info.sync_prev->thread_info.sync_next = i;
2583 if ((i->thread_info.sync_next = i->sync_next)) {
2584 pa_assert(i->sink == i->thread_info.sync_next->sink);
2585 pa_assert(i->sync_next->sync_prev == i);
2586 i->thread_info.sync_next->thread_info.sync_prev = i;
2589 pa_sink_input_attach(i);
2591 pa_sink_input_set_state_within_thread(i, i->state);
2593 /* The requested latency of the sink input needs to be fixed up and
2594 * then configured on the sink. If this causes the sink latency to
2595 * go down, the sink implementor is responsible for doing a rewind
2596 * in the update_requested_latency() callback to ensure that the
2597 * sink buffer doesn't contain more data than what the new latency
2600 * XXX: Does it really make sense to push this responsibility to
2601 * the sink implementors? Wouldn't it be better to do it once in
2602 * the core than many times in the modules? */
2604 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2605 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2607 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2608 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2610 /* We don't rewind here automatically. This is left to the
2611 * sink input implementor because some sink inputs need a
2612 * slow start, i.e. need some time to buffer client
2613 * samples before beginning streaming.
2615 * XXX: Does it really make sense to push this functionality to
2616 * the sink implementors? Wouldn't it be better to do it once in
2617 * the core than many times in the modules? */
2619 /* In flat volume mode we need to update the volume as
2621 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2624 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2625 pa_sink_input *i = PA_SINK_INPUT(userdata);
2627 /* If you change anything here, make sure to change the
2628 * sink input handling a few lines down at
2629 * PA_SINK_MESSAGE_START_MOVE, too. */
2631 pa_sink_input_detach(i);
2633 pa_sink_input_set_state_within_thread(i, i->state);
2635 /* Since the caller sleeps in pa_sink_input_unlink(),
2636 * we can safely access data outside of thread_info even
2637 * though it is mutable */
2639 pa_assert(!i->sync_prev);
2640 pa_assert(!i->sync_next);
2642 if (i->thread_info.sync_prev) {
2643 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2644 i->thread_info.sync_prev = NULL;
2647 if (i->thread_info.sync_next) {
2648 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2649 i->thread_info.sync_next = NULL;
2652 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2653 pa_sink_invalidate_requested_latency(s, true);
2654 pa_sink_request_rewind(s, (size_t) -1);
2656 /* In flat volume mode we need to update the volume as
2658 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2661 case PA_SINK_MESSAGE_START_MOVE: {
2662 pa_sink_input *i = PA_SINK_INPUT(userdata);
2664 /* We don't support moving synchronized streams. */
2665 pa_assert(!i->sync_prev);
2666 pa_assert(!i->sync_next);
2667 pa_assert(!i->thread_info.sync_next);
2668 pa_assert(!i->thread_info.sync_prev);
2670 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2672 size_t sink_nbytes, total_nbytes;
2674 /* The old sink probably has some audio from this
2675 * stream in its buffer. We want to "take it back" as
2676 * much as possible and play it to the new sink. We
2677 * don't know at this point how much the old sink can
2678 * rewind. We have to pick something, and that
2679 * something is the full latency of the old sink here.
2680 * So we rewind the stream buffer by the sink latency
2681 * amount, which may be more than what we should
2682 * rewind. This can result in a chunk of audio being
2683 * played both to the old sink and the new sink.
2685 * FIXME: Fix this code so that we don't have to make
2686 * guesses about how much the sink will actually be
2687 * able to rewind. If someone comes up with a solution
2688 * for this, something to note is that the part of the
2689 * latency that the old sink couldn't rewind should
2690 * ideally be compensated after the stream has moved
2691 * to the new sink by adding silence. The new sink
2692 * most likely can't start playing the moved stream
2693 * immediately, and that gap should be removed from
2694 * the "compensation silence" (at least at the time of
2695 * writing this, the move finish code will actually
2696 * already take care of dropping the new sink's
2697 * unrewindable latency, so taking into account the
2698 * unrewindable latency of the old sink is the only
2701 * The render_memblockq contents are discarded,
2702 * because when the sink changes, the format of the
2703 * audio stored in the render_memblockq may change
2704 * too, making the stored audio invalid. FIXME:
2705 * However, the read and write indices are moved back
2706 * the same amount, so if they are not the same now,
2707 * they won't be the same after the rewind either. If
2708 * the write index of the render_memblockq is ahead of
2709 * the read index, then the render_memblockq will feed
2710 * the new sink some silence first, which it shouldn't
2711 * do. The write index should be flushed to be the
2712 * same as the read index. */
2714 /* Get the latency of the sink */
2715 usec = pa_sink_get_latency_within_thread(s, false);
2716 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2717 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2719 if (total_nbytes > 0) {
2720 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2721 i->thread_info.rewrite_flush = true;
2722 pa_sink_input_process_rewind(i, sink_nbytes);
2726 pa_sink_input_detach(i);
2728 /* Let's remove the sink input ...*/
2729 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2731 pa_sink_invalidate_requested_latency(s, true);
2733 pa_log_debug("Requesting rewind due to started move");
2734 pa_sink_request_rewind(s, (size_t) -1);
2736 /* In flat volume mode we need to update the volume as
2738 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2741 case PA_SINK_MESSAGE_FINISH_MOVE: {
2742 pa_sink_input *i = PA_SINK_INPUT(userdata);
2744 /* We don't support moving synchronized streams. */
2745 pa_assert(!i->sync_prev);
2746 pa_assert(!i->sync_next);
2747 pa_assert(!i->thread_info.sync_next);
2748 pa_assert(!i->thread_info.sync_prev);
2750 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2752 pa_sink_input_attach(i);
2754 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2758 /* In the ideal case the new sink would start playing
2759 * the stream immediately. That requires the sink to
2760 * be able to rewind all of its latency, which usually
2761 * isn't possible, so there will probably be some gap
2762 * before the moved stream becomes audible. We then
2763 * have two possibilities: 1) start playing the stream
2764 * from where it is now, or 2) drop the unrewindable
2765 * latency of the sink from the stream. With option 1
2766 * we won't lose any audio but the stream will have a
2767 * pause. With option 2 we may lose some audio but the
2768 * stream time will be somewhat in sync with the wall
2769 * clock. Lennart seems to have chosen option 2 (one
2770 * of the reasons might have been that option 1 is
2771 * actually much harder to implement), so we drop the
2772 * latency of the new sink from the moved stream and
2773 * hope that the sink will undo most of that in the
2776 /* Get the latency of the sink */
2777 usec = pa_sink_get_latency_within_thread(s, false);
2778 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2781 pa_sink_input_drop(i, nbytes);
2783 pa_log_debug("Requesting rewind due to finished move");
2784 pa_sink_request_rewind(s, nbytes);
2787 /* Updating the requested sink latency has to be done
2788 * after the sink rewind request, not before, because
2789 * otherwise the sink may limit the rewind amount
2792 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2793 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2795 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2796 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2798 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2801 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2802 pa_sink *root_sink = pa_sink_get_master(s);
2804 if (PA_LIKELY(root_sink))
2805 set_shared_volume_within_thread(root_sink);
2810 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2812 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2814 pa_sink_volume_change_push(s);
2816 /* Fall through ... */
2818 case PA_SINK_MESSAGE_SET_VOLUME:
2820 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2821 s->thread_info.soft_volume = s->soft_volume;
2822 pa_sink_request_rewind(s, (size_t) -1);
2825 /* Fall through ... */
2827 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2828 sync_input_volumes_within_thread(s);
2831 case PA_SINK_MESSAGE_GET_VOLUME:
2833 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2835 pa_sink_volume_change_flush(s);
2836 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2839 /* In case sink implementor reset SW volume. */
2840 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2841 s->thread_info.soft_volume = s->soft_volume;
2842 pa_sink_request_rewind(s, (size_t) -1);
2847 case PA_SINK_MESSAGE_SET_MUTE:
2849 if (s->thread_info.soft_muted != s->muted) {
2850 s->thread_info.soft_muted = s->muted;
2851 pa_sink_request_rewind(s, (size_t) -1);
2854 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2859 case PA_SINK_MESSAGE_GET_MUTE:
2861 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2862 return s->get_mute(s, userdata);
2866 case PA_SINK_MESSAGE_SET_STATE: {
2867 struct set_state_data *data = userdata;
2868 bool suspend_change =
2869 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2870 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2872 if (s->set_state_in_io_thread) {
2875 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2879 s->thread_info.state = data->state;
2881 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2882 s->thread_info.rewind_nbytes = 0;
2883 s->thread_info.rewind_requested = false;
2886 if (suspend_change) {
2890 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2891 if (i->suspend_within_thread)
2892 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2898 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2900 pa_usec_t *usec = userdata;
2901 *usec = pa_sink_get_requested_latency_within_thread(s);
2903 /* Yes, that's right, the IO thread will see -1 when no
2904 * explicit requested latency is configured, the main
2905 * thread will see max_latency */
2906 if (*usec == (pa_usec_t) -1)
2907 *usec = s->thread_info.max_latency;
2912 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2913 pa_usec_t *r = userdata;
2915 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2920 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2921 pa_usec_t *r = userdata;
2923 r[0] = s->thread_info.min_latency;
2924 r[1] = s->thread_info.max_latency;
2929 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2931 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2934 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2936 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2939 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2941 *((size_t*) userdata) = s->thread_info.max_rewind;
2944 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2946 *((size_t*) userdata) = s->thread_info.max_request;
2949 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2951 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2954 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2956 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2959 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2960 /* This message is sent from IO-thread and handled in main thread. */
2961 pa_assert_ctl_context();
2963 /* Make sure we're not messing with main thread when no longer linked */
2964 if (!PA_SINK_IS_LINKED(s->state))
2967 pa_sink_get_volume(s, true);
2968 pa_sink_get_mute(s, true);
2971 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
2972 s->thread_info.port_latency_offset = offset;
2975 case PA_SINK_MESSAGE_GET_LATENCY:
2976 case PA_SINK_MESSAGE_MAX:
2983 /* Called from main thread */
2984 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2989 pa_core_assert_ref(c);
2990 pa_assert_ctl_context();
2991 pa_assert(cause != 0);
2993 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2996 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3003 /* Called from IO thread */
3004 void pa_sink_detach_within_thread(pa_sink *s) {
3008 pa_sink_assert_ref(s);
3009 pa_sink_assert_io_context(s);
3010 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3012 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3013 pa_sink_input_detach(i);
3015 if (s->monitor_source)
3016 pa_source_detach_within_thread(s->monitor_source);
3019 /* Called from IO thread */
3020 void pa_sink_attach_within_thread(pa_sink *s) {
3024 pa_sink_assert_ref(s);
3025 pa_sink_assert_io_context(s);
3026 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3028 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3029 pa_sink_input_attach(i);
3031 if (s->monitor_source)
3032 pa_source_attach_within_thread(s->monitor_source);
3035 /* Called from IO thread */
3036 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3037 pa_sink_assert_ref(s);
3038 pa_sink_assert_io_context(s);
3039 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3041 if (nbytes == (size_t) -1)
3042 nbytes = s->thread_info.max_rewind;
3044 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3046 if (s->thread_info.rewind_requested &&
3047 nbytes <= s->thread_info.rewind_nbytes)
3050 s->thread_info.rewind_nbytes = nbytes;
3051 s->thread_info.rewind_requested = true;
3053 if (s->request_rewind)
3054 s->request_rewind(s);
3057 /* Called from IO thread */
3058 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3059 pa_usec_t result = (pa_usec_t) -1;
3062 pa_usec_t monitor_latency;
3064 pa_sink_assert_ref(s);
3065 pa_sink_assert_io_context(s);
3067 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3068 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3070 if (s->thread_info.requested_latency_valid)
3071 return s->thread_info.requested_latency;
3073 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3074 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3075 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3076 result = i->thread_info.requested_sink_latency;
3078 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3080 if (monitor_latency != (pa_usec_t) -1 &&
3081 (result == (pa_usec_t) -1 || result > monitor_latency))
3082 result = monitor_latency;
3084 if (result != (pa_usec_t) -1)
3085 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3087 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3088 /* Only cache if properly initialized */
3089 s->thread_info.requested_latency = result;
3090 s->thread_info.requested_latency_valid = true;
3096 /* Called from main thread */
3097 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3100 pa_sink_assert_ref(s);
3101 pa_assert_ctl_context();
3102 pa_assert(PA_SINK_IS_LINKED(s->state));
3104 if (s->state == PA_SINK_SUSPENDED)
3107 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3112 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3113 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3117 pa_sink_assert_ref(s);
3118 pa_sink_assert_io_context(s);
3120 if (max_rewind == s->thread_info.max_rewind)
3123 s->thread_info.max_rewind = max_rewind;
3125 if (PA_SINK_IS_LINKED(s->thread_info.state))
3126 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3127 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3129 if (s->monitor_source)
3130 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3133 /* Called from main thread */
3134 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3135 pa_sink_assert_ref(s);
3136 pa_assert_ctl_context();
3138 if (PA_SINK_IS_LINKED(s->state))
3139 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3141 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3144 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3145 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3148 pa_sink_assert_ref(s);
3149 pa_sink_assert_io_context(s);
3151 if (max_request == s->thread_info.max_request)
3154 s->thread_info.max_request = max_request;
3156 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3159 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3160 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3164 /* Called from main thread */
3165 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3166 pa_sink_assert_ref(s);
3167 pa_assert_ctl_context();
3169 if (PA_SINK_IS_LINKED(s->state))
3170 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3172 pa_sink_set_max_request_within_thread(s, max_request);
3175 /* Called from IO thread */
3176 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3180 pa_sink_assert_ref(s);
3181 pa_sink_assert_io_context(s);
3183 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3184 s->thread_info.requested_latency_valid = false;
3188 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3190 if (s->update_requested_latency)
3191 s->update_requested_latency(s);
3193 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3194 if (i->update_sink_requested_latency)
3195 i->update_sink_requested_latency(i);
3199 /* Called from main thread */
3200 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3201 pa_sink_assert_ref(s);
3202 pa_assert_ctl_context();
3204 /* min_latency == 0: no limit
3205 * min_latency anything else: specified limit
3207 * Similar for max_latency */
3209 if (min_latency < ABSOLUTE_MIN_LATENCY)
3210 min_latency = ABSOLUTE_MIN_LATENCY;
3212 if (max_latency <= 0 ||
3213 max_latency > ABSOLUTE_MAX_LATENCY)
3214 max_latency = ABSOLUTE_MAX_LATENCY;
3216 pa_assert(min_latency <= max_latency);
3218 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3219 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3220 max_latency == ABSOLUTE_MAX_LATENCY) ||
3221 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3223 if (PA_SINK_IS_LINKED(s->state)) {
3229 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3231 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3234 /* Called from main thread */
3235 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3236 pa_sink_assert_ref(s);
3237 pa_assert_ctl_context();
3238 pa_assert(min_latency);
3239 pa_assert(max_latency);
3241 if (PA_SINK_IS_LINKED(s->state)) {
3242 pa_usec_t r[2] = { 0, 0 };
3244 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3246 *min_latency = r[0];
3247 *max_latency = r[1];
3249 *min_latency = s->thread_info.min_latency;
3250 *max_latency = s->thread_info.max_latency;
3254 /* Called from IO thread */
3255 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3256 pa_sink_assert_ref(s);
3257 pa_sink_assert_io_context(s);
3259 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3260 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3261 pa_assert(min_latency <= max_latency);
3263 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3264 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3265 max_latency == ABSOLUTE_MAX_LATENCY) ||
3266 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3268 if (s->thread_info.min_latency == min_latency &&
3269 s->thread_info.max_latency == max_latency)
3272 s->thread_info.min_latency = min_latency;
3273 s->thread_info.max_latency = max_latency;
3275 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3279 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3280 if (i->update_sink_latency_range)
3281 i->update_sink_latency_range(i);
3284 pa_sink_invalidate_requested_latency(s, false);
3286 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3289 /* Called from main thread */
3290 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3291 pa_sink_assert_ref(s);
3292 pa_assert_ctl_context();
3294 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3295 pa_assert(latency == 0);
3299 if (latency < ABSOLUTE_MIN_LATENCY)
3300 latency = ABSOLUTE_MIN_LATENCY;
3302 if (latency > ABSOLUTE_MAX_LATENCY)
3303 latency = ABSOLUTE_MAX_LATENCY;
3305 if (PA_SINK_IS_LINKED(s->state))
3306 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3308 s->thread_info.fixed_latency = latency;
3310 pa_source_set_fixed_latency(s->monitor_source, latency);
3313 /* Called from main thread */
3314 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3317 pa_sink_assert_ref(s);
3318 pa_assert_ctl_context();
3320 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3323 if (PA_SINK_IS_LINKED(s->state))
3324 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3326 latency = s->thread_info.fixed_latency;
3331 /* Called from IO thread */
3332 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3333 pa_sink_assert_ref(s);
3334 pa_sink_assert_io_context(s);
3336 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3337 pa_assert(latency == 0);
3338 s->thread_info.fixed_latency = 0;
3340 if (s->monitor_source)
3341 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3346 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3347 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3349 if (s->thread_info.fixed_latency == latency)
3352 s->thread_info.fixed_latency = latency;
3354 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3358 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3359 if (i->update_sink_fixed_latency)
3360 i->update_sink_fixed_latency(i);
3363 pa_sink_invalidate_requested_latency(s, false);
3365 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3368 /* Called from main context */
3369 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3370 pa_sink_assert_ref(s);
3372 s->port_latency_offset = offset;
3374 if (PA_SINK_IS_LINKED(s->state))
3375 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3377 s->thread_info.port_latency_offset = offset;
3379 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3382 /* Called from main context */
3383 size_t pa_sink_get_max_rewind(pa_sink *s) {
3385 pa_assert_ctl_context();
3386 pa_sink_assert_ref(s);
3388 if (!PA_SINK_IS_LINKED(s->state))
3389 return s->thread_info.max_rewind;
3391 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3396 /* Called from main context */
3397 size_t pa_sink_get_max_request(pa_sink *s) {
3399 pa_sink_assert_ref(s);
3400 pa_assert_ctl_context();
3402 if (!PA_SINK_IS_LINKED(s->state))
3403 return s->thread_info.max_request;
3405 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3410 /* Called from main context */
3411 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3412 pa_device_port *port;
3414 pa_sink_assert_ref(s);
3415 pa_assert_ctl_context();
3418 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3419 return -PA_ERR_NOTIMPLEMENTED;
3423 return -PA_ERR_NOENTITY;
3425 if (!(port = pa_hashmap_get(s->ports, name)))
3426 return -PA_ERR_NOENTITY;
3428 if (s->active_port == port) {
3429 s->save_port = s->save_port || save;
3433 if (s->set_port(s, port) < 0)
3434 return -PA_ERR_NOENTITY;
3436 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3438 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3440 s->active_port = port;
3441 s->save_port = save;
3443 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3445 /* The active port affects the default sink selection. */
3446 pa_core_update_default_sink(s->core);
3448 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3453 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3454 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3458 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3461 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3463 if (pa_streq(ff, "microphone"))
3464 t = "audio-input-microphone";
3465 else if (pa_streq(ff, "webcam"))
3467 else if (pa_streq(ff, "computer"))
3469 else if (pa_streq(ff, "handset"))
3471 else if (pa_streq(ff, "portable"))
3472 t = "multimedia-player";
3473 else if (pa_streq(ff, "tv"))
3474 t = "video-display";
3477 * The following icons are not part of the icon naming spec,
3478 * because Rodney Dawes sucks as the maintainer of that spec.
3480 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3482 else if (pa_streq(ff, "headset"))
3483 t = "audio-headset";
3484 else if (pa_streq(ff, "headphone"))
3485 t = "audio-headphones";
3486 else if (pa_streq(ff, "speaker"))
3487 t = "audio-speakers";
3488 else if (pa_streq(ff, "hands-free"))
3489 t = "audio-handsfree";
3493 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3494 if (pa_streq(c, "modem"))
3501 t = "audio-input-microphone";
3504 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3505 if (strstr(profile, "analog"))
3507 else if (strstr(profile, "iec958"))
3509 else if (strstr(profile, "hdmi"))
3513 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3515 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3520 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3521 const char *s, *d = NULL, *k;
3524 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3528 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3532 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3533 if (pa_streq(s, "internal"))
3534 d = _("Built-in Audio");
3537 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3538 if (pa_streq(s, "modem"))
3542 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3547 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3550 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3552 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3557 bool pa_device_init_intended_roles(pa_proplist *p) {
3561 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3564 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3565 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3566 || pa_streq(s, "headset")) {
3567 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3574 unsigned pa_device_init_priority(pa_proplist *p) {
3576 unsigned priority = 0;
3580 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3582 if (pa_streq(s, "sound"))
3584 else if (!pa_streq(s, "modem"))
3588 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3590 if (pa_streq(s, "headphone"))
3592 else if (pa_streq(s, "hifi"))
3594 else if (pa_streq(s, "speaker"))
3596 else if (pa_streq(s, "portable"))
3600 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3602 if (pa_streq(s, "bluetooth"))
3604 else if (pa_streq(s, "usb"))
3606 else if (pa_streq(s, "pci"))
3610 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3612 if (pa_startswith(s, "analog-")) {
3615 /* If an analog device has an intended role of "phone", it probably
3616 * co-exists with another device that is meant for everything else,
3617 * and that other device should have higher priority than the phone
3619 if (pa_str_in_list_spaces(pa_proplist_gets(p, PA_PROP_DEVICE_INTENDED_ROLES), "phone"))
3622 else if (pa_startswith(s, "iec958-"))
3629 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3631 /* Called from the IO thread. */
3632 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3633 pa_sink_volume_change *c;
3634 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3635 c = pa_xnew(pa_sink_volume_change, 1);
3637 PA_LLIST_INIT(pa_sink_volume_change, c);
3639 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3643 /* Called from the IO thread. */
3644 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3646 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3650 /* Called from the IO thread. */
3651 void pa_sink_volume_change_push(pa_sink *s) {
3652 pa_sink_volume_change *c = NULL;
3653 pa_sink_volume_change *nc = NULL;
3654 pa_sink_volume_change *pc = NULL;
3655 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3657 const char *direction = NULL;
3660 nc = pa_sink_volume_change_new(s);
3662 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3663 * Adding one more volume for HW would get us rid of this, but I am trying
3664 * to survive with the ones we already have. */
3665 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3667 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3668 pa_log_debug("Volume not changing");
3669 pa_sink_volume_change_free(nc);
3673 nc->at = pa_sink_get_latency_within_thread(s, false);
3674 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3676 if (s->thread_info.volume_changes_tail) {
3677 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3678 /* If volume is going up let's do it a bit late. If it is going
3679 * down let's do it a bit early. */
3680 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3681 if (nc->at + safety_margin > c->at) {
3682 nc->at += safety_margin;
3687 else if (nc->at - safety_margin > c->at) {
3688 nc->at -= safety_margin;
3696 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3697 nc->at += safety_margin;
3700 nc->at -= safety_margin;
3703 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3706 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3709 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3711 /* We can ignore volume events that came earlier but should happen later than this. */
3712 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3713 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3714 pa_sink_volume_change_free(c);
3717 s->thread_info.volume_changes_tail = nc;
3720 /* Called from the IO thread. */
3721 static void pa_sink_volume_change_flush(pa_sink *s) {
3722 pa_sink_volume_change *c = s->thread_info.volume_changes;
3724 s->thread_info.volume_changes = NULL;
3725 s->thread_info.volume_changes_tail = NULL;
3727 pa_sink_volume_change *next = c->next;
3728 pa_sink_volume_change_free(c);
3733 /* Called from the IO thread. */
3734 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3740 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3746 pa_assert(s->write_volume);
3748 now = pa_rtclock_now();
3750 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3751 pa_sink_volume_change *c = s->thread_info.volume_changes;
3752 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3753 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3754 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3756 s->thread_info.current_hw_volume = c->hw_volume;
3757 pa_sink_volume_change_free(c);
3763 if (s->thread_info.volume_changes) {
3765 *usec_to_next = s->thread_info.volume_changes->at - now;
3766 if (pa_log_ratelimit(PA_LOG_DEBUG))
3767 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3772 s->thread_info.volume_changes_tail = NULL;
3777 /* Called from the IO thread. */
3778 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3779 /* All the queued volume events later than current latency are shifted to happen earlier. */
3780 pa_sink_volume_change *c;
3781 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3782 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3783 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3785 pa_log_debug("latency = %lld", (long long) limit);
3786 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3788 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3789 pa_usec_t modified_limit = limit;
3790 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3791 modified_limit -= s->thread_info.volume_change_safety_margin;
3793 modified_limit += s->thread_info.volume_change_safety_margin;
3794 if (c->at > modified_limit) {
3796 if (c->at < modified_limit)
3797 c->at = modified_limit;
3799 prev_vol = pa_cvolume_avg(&c->hw_volume);
3801 pa_sink_volume_change_apply(s, NULL);
3804 /* Called from the main thread */
3805 /* Gets the list of formats supported by the sink. The members and idxset must
3806 * be freed by the caller. */
3807 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3812 if (s->get_formats) {
3813 /* Sink supports format query, all is good */
3814 ret = s->get_formats(s);
3816 /* Sink doesn't support format query, so assume it does PCM */
3817 pa_format_info *f = pa_format_info_new();
3818 f->encoding = PA_ENCODING_PCM;
3820 ret = pa_idxset_new(NULL, NULL);
3821 pa_idxset_put(ret, f, NULL);
3827 /* Called from the main thread */
3828 /* Allows an external source to set what formats a sink supports if the sink
3829 * permits this. The function makes a copy of the formats on success. */
3830 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3835 /* Sink supports setting formats -- let's give it a shot */
3836 return s->set_formats(s, formats);
3838 /* Sink doesn't support setting this -- bail out */
3842 /* Called from the main thread */
3843 /* Checks if the sink can accept this format */
3844 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3845 pa_idxset *formats = NULL;
3851 formats = pa_sink_get_formats(s);
3854 pa_format_info *finfo_device;
3857 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3858 if (pa_format_info_is_compatible(finfo_device, f)) {
3864 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3870 /* Called from the main thread */
3871 /* Calculates the intersection between formats supported by the sink and
3872 * in_formats, and returns these, in the order of the sink's formats. */
3873 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3874 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3875 pa_format_info *f_sink, *f_in;
3880 if (!in_formats || pa_idxset_isempty(in_formats))
3883 sink_formats = pa_sink_get_formats(s);
3885 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3886 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3887 if (pa_format_info_is_compatible(f_sink, f_in))
3888 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3894 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3899 /* Called from the main thread */
3900 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3901 pa_sample_format_t old_format;
3904 pa_assert(pa_sample_format_valid(format));
3906 old_format = s->sample_spec.format;
3907 if (old_format == format)
3910 pa_log_info("%s: format: %s -> %s",
3911 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
3913 s->sample_spec.format = format;
3915 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3918 /* Called from the main thread */
3919 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
3923 pa_assert(pa_sample_rate_valid(rate));
3925 old_rate = s->sample_spec.rate;
3926 if (old_rate == rate)
3929 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
3931 s->sample_spec.rate = rate;
3933 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3936 /* Called from the main thread. */
3937 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3938 pa_cvolume old_volume;
3939 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3940 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3945 old_volume = s->reference_volume;
3947 if (pa_cvolume_equal(volume, &old_volume))
3950 s->reference_volume = *volume;
3951 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3952 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3953 s->flags & PA_SINK_DECIBEL_VOLUME),
3954 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3955 s->flags & PA_SINK_DECIBEL_VOLUME));
3957 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3958 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
3961 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
3966 pa_assert(old_sink);
3968 if (core->state == PA_CORE_SHUTDOWN)
3971 if (core->default_sink == NULL || core->default_sink->unlink_requested)
3974 if (old_sink == core->default_sink)
3977 PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
3978 if (!PA_SINK_INPUT_IS_LINKED(i->state))
3984 /* Don't move sink-inputs which connect filter sinks to their target sinks */
3988 /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
3989 if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
3992 if (!pa_sink_input_may_move_to(i, core->default_sink))
3995 if (default_sink_changed)
3996 pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
3997 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
3999 pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
4000 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4002 pa_sink_input_move_to(i, core->default_sink, false);