2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/flist.h>
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
54 struct pa_source_volume_change {
58 PA_LLIST_FIELDS(pa_source_volume_change);
61 struct source_message_set_port {
66 static void source_free(pa_object *o);
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
75 data->proplist = pa_proplist_new();
76 data->ports = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
81 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
85 data->name = pa_xstrdup(name);
88 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
91 if ((data->sample_spec_is_set = !!spec))
92 data->sample_spec = *spec;
95 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
98 if ((data->channel_map_is_set = !!map))
99 data->channel_map = *map;
102 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
105 data->alternate_sample_rate_is_set = TRUE;
106 data->alternate_sample_rate = alternate_sample_rate;
109 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
112 if ((data->volume_is_set = !!volume))
113 data->volume = *volume;
116 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
119 data->muted_is_set = TRUE;
120 data->muted = !!mute;
123 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
126 pa_xfree(data->active_port);
127 data->active_port = pa_xstrdup(port);
130 void pa_source_new_data_done(pa_source_new_data *data) {
133 pa_proplist_free(data->proplist);
136 pa_hashmap_free(data->ports, (pa_free_cb_t) pa_device_port_unref);
138 pa_xfree(data->name);
139 pa_xfree(data->active_port);
142 /* Called from main context */
143 static void reset_callbacks(pa_source *s) {
147 s->get_volume = NULL;
148 s->set_volume = NULL;
149 s->write_volume = NULL;
152 s->update_requested_latency = NULL;
154 s->get_formats = NULL;
155 s->update_rate = NULL;
158 /* Called from main context */
159 pa_source* pa_source_new(
161 pa_source_new_data *data,
162 pa_source_flags_t flags) {
166 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
171 pa_assert(data->name);
172 pa_assert_ctl_context();
174 s = pa_msgobject_new(pa_source);
176 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
177 pa_log_debug("Failed to register name %s.", data->name);
182 pa_source_new_data_set_name(data, name);
184 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
186 pa_namereg_unregister(core, name);
190 /* FIXME, need to free s here on failure */
192 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
193 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
195 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
197 if (!data->channel_map_is_set)
198 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
200 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
201 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
203 /* FIXME: There should probably be a general function for checking whether
204 * the source volume is allowed to be set, like there is for source outputs. */
205 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
207 if (!data->volume_is_set) {
208 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
209 data->save_volume = FALSE;
212 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
213 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
215 if (!data->muted_is_set)
219 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
221 pa_device_init_description(data->proplist);
222 pa_device_init_icon(data->proplist, FALSE);
223 pa_device_init_intended_roles(data->proplist);
225 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
227 pa_namereg_unregister(core, name);
231 s->parent.parent.free = source_free;
232 s->parent.process_msg = pa_source_process_msg;
235 s->state = PA_SOURCE_INIT;
238 s->suspend_cause = data->suspend_cause;
239 pa_source_set_mixer_dirty(s, FALSE);
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
246 s->priority = pa_device_init_priority(s->proplist);
248 s->sample_spec = data->sample_spec;
249 s->channel_map = data->channel_map;
250 s->default_sample_rate = s->sample_spec.rate;
252 if (data->alternate_sample_rate_is_set)
253 s->alternate_sample_rate = data->alternate_sample_rate;
255 s->alternate_sample_rate = s->core->alternate_sample_rate;
257 if (s->sample_spec.rate == s->alternate_sample_rate) {
258 pa_log_warn("Default and alternate sample rates are the same.");
259 s->alternate_sample_rate = 0;
262 s->outputs = pa_idxset_new(NULL, NULL);
264 s->monitor_of = NULL;
265 s->output_from_master = NULL;
267 s->reference_volume = s->real_volume = data->volume;
268 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
269 s->base_volume = PA_VOLUME_NORM;
270 s->n_volume_steps = PA_VOLUME_NORM+1;
271 s->muted = data->muted;
272 s->refresh_volume = s->refresh_muted = FALSE;
279 /* As a minor optimization we just steal the list instead of
281 s->ports = data->ports;
284 s->active_port = NULL;
285 s->save_port = FALSE;
287 if (data->active_port)
288 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
289 s->save_port = data->save_port;
291 if (!s->active_port) {
295 PA_HASHMAP_FOREACH(p, s->ports, state)
296 if (!s->active_port || p->priority > s->active_port->priority)
301 s->latency_offset = s->active_port->latency_offset;
303 s->latency_offset = 0;
305 s->save_volume = data->save_volume;
306 s->save_muted = data->save_muted;
308 pa_silence_memchunk_get(
309 &core->silence_cache,
315 s->thread_info.rtpoll = NULL;
316 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
317 s->thread_info.soft_volume = s->soft_volume;
318 s->thread_info.soft_muted = s->muted;
319 s->thread_info.state = s->state;
320 s->thread_info.max_rewind = 0;
321 s->thread_info.requested_latency_valid = FALSE;
322 s->thread_info.requested_latency = 0;
323 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
324 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
325 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
327 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
328 s->thread_info.volume_changes_tail = NULL;
329 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
330 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
331 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
332 s->thread_info.latency_offset = s->latency_offset;
334 /* FIXME: This should probably be moved to pa_source_put() */
335 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
338 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
340 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
341 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
344 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
345 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
352 /* Called from main context */
353 static int source_set_state(pa_source *s, pa_source_state_t state) {
355 pa_bool_t suspend_change;
356 pa_source_state_t original_state;
359 pa_assert_ctl_context();
361 if (s->state == state)
364 original_state = s->state;
367 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
368 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
371 if ((ret = s->set_state(s, state)) < 0)
375 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
378 s->set_state(s, original_state);
385 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
386 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
387 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
390 if (suspend_change) {
394 /* We're suspending or resuming, tell everyone about it */
396 PA_IDXSET_FOREACH(o, s->outputs, idx)
397 if (s->state == PA_SOURCE_SUSPENDED &&
398 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
399 pa_source_output_kill(o);
401 o->suspend(o, state == PA_SOURCE_SUSPENDED);
407 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
413 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
414 pa_source_flags_t flags;
417 pa_assert(!s->write_volume || cb);
421 /* Save the current flags so we can tell if they've changed */
425 /* The source implementor is responsible for setting decibel volume support */
426 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
428 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
429 /* See note below in pa_source_put() about volume sharing and decibel volumes */
430 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
433 /* If the flags have changed after init, let any clients know via a change event */
434 if (s->state != PA_SOURCE_INIT && flags != s->flags)
435 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
438 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
439 pa_source_flags_t flags;
442 pa_assert(!cb || s->set_volume);
444 s->write_volume = cb;
446 /* Save the current flags so we can tell if they've changed */
450 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
452 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
454 /* If the flags have changed after init, let any clients know via a change event */
455 if (s->state != PA_SOURCE_INIT && flags != s->flags)
456 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
459 void pa_source_set_get_mute_callback(pa_source *s, pa_source_cb_t cb) {
465 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
466 pa_source_flags_t flags;
472 /* Save the current flags so we can tell if they've changed */
476 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
478 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
480 /* If the flags have changed after init, let any clients know via a change event */
481 if (s->state != PA_SOURCE_INIT && flags != s->flags)
482 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
485 static void enable_flat_volume(pa_source *s, pa_bool_t enable) {
486 pa_source_flags_t flags;
490 /* Always follow the overall user preference here */
491 enable = enable && s->core->flat_volumes;
493 /* Save the current flags so we can tell if they've changed */
497 s->flags |= PA_SOURCE_FLAT_VOLUME;
499 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
501 /* If the flags have changed after init, let any clients know via a change event */
502 if (s->state != PA_SOURCE_INIT && flags != s->flags)
503 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
506 void pa_source_enable_decibel_volume(pa_source *s, pa_bool_t enable) {
507 pa_source_flags_t flags;
511 /* Save the current flags so we can tell if they've changed */
515 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
516 enable_flat_volume(s, TRUE);
518 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
519 enable_flat_volume(s, FALSE);
522 /* If the flags have changed after init, let any clients know via a change event */
523 if (s->state != PA_SOURCE_INIT && flags != s->flags)
524 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
527 /* Called from main context */
528 void pa_source_put(pa_source *s) {
529 pa_source_assert_ref(s);
530 pa_assert_ctl_context();
532 pa_assert(s->state == PA_SOURCE_INIT);
533 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
535 /* The following fields must be initialized properly when calling _put() */
536 pa_assert(s->asyncmsgq);
537 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
539 /* Generally, flags should be initialized via pa_source_new(). As a
540 * special exception we allow some volume related flags to be set
541 * between _new() and _put() by the callback setter functions above.
543 * Thus we implement a couple safeguards here which ensure the above
544 * setters were used (or at least the implementor made manual changes
545 * in a compatible way).
547 * Note: All of these flags set here can change over the life time
549 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
550 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
551 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
553 /* XXX: Currently decibel volume is disabled for all sources that use volume
554 * sharing. When the master source supports decibel volume, it would be good
555 * to have the flag also in the filter source, but currently we don't do that
556 * so that the flags of the filter source never change when it's moved from
557 * a master source to another. One solution for this problem would be to
558 * remove user-visible volume altogether from filter sources when volume
559 * sharing is used, but the current approach was easier to implement... */
560 /* We always support decibel volumes in software, otherwise we leave it to
561 * the source implementor to set this flag as needed.
563 * Note: This flag can also change over the life time of the source. */
564 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
565 pa_source_enable_decibel_volume(s, TRUE);
567 /* If the source implementor support DB volumes by itself, we should always
568 * try and enable flat volumes too */
569 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
570 enable_flat_volume(s, TRUE);
572 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
573 pa_source *root_source = pa_source_get_master(s);
575 pa_assert(PA_LIKELY(root_source));
577 s->reference_volume = root_source->reference_volume;
578 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
580 s->real_volume = root_source->real_volume;
581 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
583 /* We assume that if the sink implementor changed the default
584 * volume he did so in real_volume, because that is the usual
585 * place where he is supposed to place his changes. */
586 s->reference_volume = s->real_volume;
588 s->thread_info.soft_volume = s->soft_volume;
589 s->thread_info.soft_muted = s->muted;
590 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
592 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
593 || (s->base_volume == PA_VOLUME_NORM
594 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
595 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
596 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
598 if (s->suspend_cause)
599 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
601 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
603 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
604 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
607 /* Called from main context */
608 void pa_source_unlink(pa_source *s) {
610 pa_source_output *o, *j = NULL;
613 pa_assert_ctl_context();
615 /* See pa_sink_unlink() for a couple of comments how this function
618 linked = PA_SOURCE_IS_LINKED(s->state);
621 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
623 if (s->state != PA_SOURCE_UNLINKED)
624 pa_namereg_unregister(s->core, s->name);
625 pa_idxset_remove_by_data(s->core->sources, s, NULL);
628 pa_idxset_remove_by_data(s->card->sources, s, NULL);
630 while ((o = pa_idxset_first(s->outputs, NULL))) {
632 pa_source_output_kill(o);
637 source_set_state(s, PA_SOURCE_UNLINKED);
639 s->state = PA_SOURCE_UNLINKED;
644 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
645 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
649 /* Called from main context */
650 static void source_free(pa_object *o) {
651 pa_source *s = PA_SOURCE(o);
654 pa_assert_ctl_context();
655 pa_assert(pa_source_refcnt(s) == 0);
657 if (PA_SOURCE_IS_LINKED(s->state))
660 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
662 pa_idxset_free(s->outputs, NULL);
663 pa_hashmap_free(s->thread_info.outputs, (pa_free_cb_t) pa_source_output_unref);
665 if (s->silence.memblock)
666 pa_memblock_unref(s->silence.memblock);
672 pa_proplist_free(s->proplist);
675 pa_hashmap_free(s->ports, (pa_free_cb_t) pa_device_port_unref);
680 /* Called from main context, and not while the IO thread is active, please */
681 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
682 pa_source_assert_ref(s);
683 pa_assert_ctl_context();
688 /* Called from main context, and not while the IO thread is active, please */
689 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
690 pa_source_flags_t old_flags;
691 pa_source_output *output;
694 pa_source_assert_ref(s);
695 pa_assert_ctl_context();
697 /* For now, allow only a minimal set of flags to be changed. */
698 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
700 old_flags = s->flags;
701 s->flags = (s->flags & ~mask) | (value & mask);
703 if (s->flags == old_flags)
706 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
707 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
709 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
710 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
711 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
713 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
714 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
716 PA_IDXSET_FOREACH(output, s->outputs, idx) {
717 if (output->destination_source)
718 pa_source_update_flags(output->destination_source, mask, value);
722 /* Called from IO context, or before _put() from main context */
723 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
724 pa_source_assert_ref(s);
725 pa_source_assert_io_context(s);
727 s->thread_info.rtpoll = p;
730 /* Called from main context */
731 int pa_source_update_status(pa_source*s) {
732 pa_source_assert_ref(s);
733 pa_assert_ctl_context();
734 pa_assert(PA_SOURCE_IS_LINKED(s->state));
736 if (s->state == PA_SOURCE_SUSPENDED)
739 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
742 /* Called from any context - must be threadsafe */
743 void pa_source_set_mixer_dirty(pa_source *s, pa_bool_t is_dirty) {
744 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
747 /* Called from main context */
748 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
749 pa_source_assert_ref(s);
750 pa_assert_ctl_context();
751 pa_assert(PA_SOURCE_IS_LINKED(s->state));
752 pa_assert(cause != 0);
754 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
755 return -PA_ERR_NOTSUPPORTED;
758 s->suspend_cause |= cause;
760 s->suspend_cause &= ~cause;
762 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
763 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
764 it'll be handled just fine. */
765 pa_source_set_mixer_dirty(s, FALSE);
766 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
767 if (s->active_port && s->set_port) {
768 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
769 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
770 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
773 s->set_port(s, s->active_port);
783 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
786 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
788 if (s->suspend_cause)
789 return source_set_state(s, PA_SOURCE_SUSPENDED);
791 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
794 /* Called from main context */
795 int pa_source_sync_suspend(pa_source *s) {
796 pa_sink_state_t state;
798 pa_source_assert_ref(s);
799 pa_assert_ctl_context();
800 pa_assert(PA_SOURCE_IS_LINKED(s->state));
801 pa_assert(s->monitor_of);
803 state = pa_sink_get_state(s->monitor_of);
805 if (state == PA_SINK_SUSPENDED)
806 return source_set_state(s, PA_SOURCE_SUSPENDED);
808 pa_assert(PA_SINK_IS_OPENED(state));
810 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
813 /* Called from main context */
814 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
815 pa_source_output *o, *n;
818 pa_source_assert_ref(s);
819 pa_assert_ctl_context();
820 pa_assert(PA_SOURCE_IS_LINKED(s->state));
825 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
826 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
828 pa_source_output_ref(o);
830 if (pa_source_output_start_move(o) >= 0)
833 pa_source_output_unref(o);
839 /* Called from main context */
840 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
843 pa_source_assert_ref(s);
844 pa_assert_ctl_context();
845 pa_assert(PA_SOURCE_IS_LINKED(s->state));
848 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
849 if (pa_source_output_finish_move(o, s, save) < 0)
850 pa_source_output_fail_move(o);
852 pa_source_output_unref(o);
855 pa_queue_free(q, NULL);
858 /* Called from main context */
859 void pa_source_move_all_fail(pa_queue *q) {
862 pa_assert_ctl_context();
865 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
866 pa_source_output_fail_move(o);
867 pa_source_output_unref(o);
870 pa_queue_free(q, NULL);
873 /* Called from IO thread context */
874 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
878 pa_source_assert_ref(s);
879 pa_source_assert_io_context(s);
880 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
885 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
888 pa_log_debug("Processing rewind...");
890 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
891 pa_source_output_assert_ref(o);
892 pa_source_output_process_rewind(o, nbytes);
896 /* Called from IO thread context */
897 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
901 pa_source_assert_ref(s);
902 pa_source_assert_io_context(s);
903 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
906 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
909 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
910 pa_memchunk vchunk = *chunk;
912 pa_memblock_ref(vchunk.memblock);
913 pa_memchunk_make_writable(&vchunk, 0);
915 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
916 pa_silence_memchunk(&vchunk, &s->sample_spec);
918 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
920 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
921 pa_source_output_assert_ref(o);
923 if (!o->thread_info.direct_on_input)
924 pa_source_output_push(o, &vchunk);
927 pa_memblock_unref(vchunk.memblock);
930 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
931 pa_source_output_assert_ref(o);
933 if (!o->thread_info.direct_on_input)
934 pa_source_output_push(o, chunk);
939 /* Called from IO thread context */
940 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
941 pa_source_assert_ref(s);
942 pa_source_assert_io_context(s);
943 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
944 pa_source_output_assert_ref(o);
945 pa_assert(o->thread_info.direct_on_input);
948 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
951 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
952 pa_memchunk vchunk = *chunk;
954 pa_memblock_ref(vchunk.memblock);
955 pa_memchunk_make_writable(&vchunk, 0);
957 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
958 pa_silence_memchunk(&vchunk, &s->sample_spec);
960 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
962 pa_source_output_push(o, &vchunk);
964 pa_memblock_unref(vchunk.memblock);
966 pa_source_output_push(o, chunk);
969 /* Called from main thread */
970 pa_bool_t pa_source_update_rate(pa_source *s, uint32_t rate, pa_bool_t passthrough) {
971 pa_bool_t ret = FALSE;
973 if (s->update_rate) {
974 uint32_t desired_rate = rate;
975 uint32_t default_rate = s->default_sample_rate;
976 uint32_t alternate_rate = s->alternate_sample_rate;
979 pa_bool_t use_alternate = FALSE;
981 if (PA_UNLIKELY(default_rate == alternate_rate)) {
982 pa_log_warn("Default and alternate sample rates are the same.");
986 if (PA_SOURCE_IS_RUNNING(s->state)) {
987 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
988 s->sample_spec.rate);
992 if (PA_UNLIKELY (desired_rate < 8000 ||
993 desired_rate > PA_RATE_MAX))
997 pa_assert(default_rate % 4000 || default_rate % 11025);
998 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
1000 if (default_rate % 4000) {
1001 /* default is a 11025 multiple */
1002 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1005 /* default is 4000 multiple */
1006 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1011 desired_rate = alternate_rate;
1013 desired_rate = default_rate;
1015 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1018 if (desired_rate == s->sample_spec.rate)
1021 if (!passthrough && pa_source_used_by(s) > 0)
1024 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1025 pa_source_suspend(s, TRUE, PA_SUSPEND_INTERNAL);
1027 if (s->update_rate(s, desired_rate) == TRUE) {
1028 pa_log_info("Changed sampling rate successfully ");
1030 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1031 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1032 pa_source_output_update_rate(o);
1037 pa_source_suspend(s, FALSE, PA_SUSPEND_INTERNAL);
1043 /* Called from main thread */
1044 pa_usec_t pa_source_get_latency(pa_source *s) {
1047 pa_source_assert_ref(s);
1048 pa_assert_ctl_context();
1049 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1051 if (s->state == PA_SOURCE_SUSPENDED)
1054 if (!(s->flags & PA_SOURCE_LATENCY))
1057 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1059 /* usec is unsigned, so check that the offset can be added to usec without
1061 if (-s->latency_offset <= (int64_t) usec)
1062 usec += s->latency_offset;
1069 /* Called from IO thread */
1070 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1074 pa_source_assert_ref(s);
1075 pa_source_assert_io_context(s);
1076 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1078 /* The returned value is supposed to be in the time domain of the sound card! */
1080 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1083 if (!(s->flags & PA_SOURCE_LATENCY))
1086 o = PA_MSGOBJECT(s);
1088 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1090 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1093 /* usec is unsigned, so check that the offset can be added to usec without
1095 if (-s->thread_info.latency_offset <= (int64_t) usec)
1096 usec += s->thread_info.latency_offset;
1103 /* Called from the main thread (and also from the IO thread while the main
1104 * thread is waiting).
1106 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1107 * set. Instead, flat volume mode is detected by checking whether the root source
1108 * has the flag set. */
1109 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
1110 pa_source_assert_ref(s);
1112 s = pa_source_get_master(s);
1115 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1120 /* Called from the main thread (and also from the IO thread while the main
1121 * thread is waiting). */
1122 pa_source *pa_source_get_master(pa_source *s) {
1123 pa_source_assert_ref(s);
1125 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1126 if (PA_UNLIKELY(!s->output_from_master))
1129 s = s->output_from_master->source;
1135 /* Called from main context */
1136 pa_bool_t pa_source_is_passthrough(pa_source *s) {
1138 pa_source_assert_ref(s);
1140 /* NB Currently only monitor sources support passthrough mode */
1141 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1144 /* Called from main context */
1145 void pa_source_enter_passthrough(pa_source *s) {
1148 /* set the volume to NORM */
1149 s->saved_volume = *pa_source_get_volume(s, TRUE);
1150 s->saved_save_volume = s->save_volume;
1152 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1153 pa_source_set_volume(s, &volume, TRUE, FALSE);
1156 /* Called from main context */
1157 void pa_source_leave_passthrough(pa_source *s) {
1158 /* Restore source volume to what it was before we entered passthrough mode */
1159 pa_source_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1161 pa_cvolume_init(&s->saved_volume);
1162 s->saved_save_volume = FALSE;
1165 /* Called from main context. */
1166 static void compute_reference_ratio(pa_source_output *o) {
1168 pa_cvolume remapped;
1171 pa_assert(pa_source_flat_volume_enabled(o->source));
1174 * Calculates the reference ratio from the source's reference
1175 * volume. This basically calculates:
1177 * o->reference_ratio = o->volume / o->source->reference_volume
1180 remapped = o->source->reference_volume;
1181 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1183 o->reference_ratio.channels = o->sample_spec.channels;
1185 for (c = 0; c < o->sample_spec.channels; c++) {
1187 /* We don't update when the source volume is 0 anyway */
1188 if (remapped.values[c] <= PA_VOLUME_MUTED)
1191 /* Don't update the reference ratio unless necessary */
1192 if (pa_sw_volume_multiply(
1193 o->reference_ratio.values[c],
1194 remapped.values[c]) == o->volume.values[c])
1197 o->reference_ratio.values[c] = pa_sw_volume_divide(
1198 o->volume.values[c],
1199 remapped.values[c]);
1203 /* Called from main context. Only called for the root source in volume sharing
1204 * cases, except for internal recursive calls. */
1205 static void compute_reference_ratios(pa_source *s) {
1207 pa_source_output *o;
1209 pa_source_assert_ref(s);
1210 pa_assert_ctl_context();
1211 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1212 pa_assert(pa_source_flat_volume_enabled(s));
1214 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1215 compute_reference_ratio(o);
1217 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1218 compute_reference_ratios(o->destination_source);
1222 /* Called from main context. Only called for the root source in volume sharing
1223 * cases, except for internal recursive calls. */
1224 static void compute_real_ratios(pa_source *s) {
1225 pa_source_output *o;
1228 pa_source_assert_ref(s);
1229 pa_assert_ctl_context();
1230 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1231 pa_assert(pa_source_flat_volume_enabled(s));
1233 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1235 pa_cvolume remapped;
1237 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1238 /* The origin source uses volume sharing, so this input's real ratio
1239 * is handled as a special case - the real ratio must be 0 dB, and
1240 * as a result i->soft_volume must equal i->volume_factor. */
1241 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1242 o->soft_volume = o->volume_factor;
1244 compute_real_ratios(o->destination_source);
1250 * This basically calculates:
1252 * i->real_ratio := i->volume / s->real_volume
1253 * i->soft_volume := i->real_ratio * i->volume_factor
1256 remapped = s->real_volume;
1257 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1259 o->real_ratio.channels = o->sample_spec.channels;
1260 o->soft_volume.channels = o->sample_spec.channels;
1262 for (c = 0; c < o->sample_spec.channels; c++) {
1264 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1265 /* We leave o->real_ratio untouched */
1266 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1270 /* Don't lose accuracy unless necessary */
1271 if (pa_sw_volume_multiply(
1272 o->real_ratio.values[c],
1273 remapped.values[c]) != o->volume.values[c])
1275 o->real_ratio.values[c] = pa_sw_volume_divide(
1276 o->volume.values[c],
1277 remapped.values[c]);
1279 o->soft_volume.values[c] = pa_sw_volume_multiply(
1280 o->real_ratio.values[c],
1281 o->volume_factor.values[c]);
1284 /* We don't copy the soft_volume to the thread_info data
1285 * here. That must be done by the caller */
1289 static pa_cvolume *cvolume_remap_minimal_impact(
1291 const pa_cvolume *template,
1292 const pa_channel_map *from,
1293 const pa_channel_map *to) {
1298 pa_assert(template);
1301 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1302 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1304 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1305 * mapping from source output to source volumes:
1307 * If template is a possible remapping from v it is used instead
1308 * of remapping anew.
1310 * If the channel maps don't match we set an all-channel volume on
1311 * the source to ensure that changing a volume on one stream has no
1312 * effect that cannot be compensated for in another stream that
1313 * does not have the same channel map as the source. */
1315 if (pa_channel_map_equal(from, to))
1319 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1324 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1328 /* Called from main thread. Only called for the root source in volume sharing
1329 * cases, except for internal recursive calls. */
1330 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1331 pa_source_output *o;
1334 pa_source_assert_ref(s);
1335 pa_assert(max_volume);
1336 pa_assert(channel_map);
1337 pa_assert(pa_source_flat_volume_enabled(s));
1339 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1340 pa_cvolume remapped;
1342 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1343 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1345 /* Ignore this output. The origin source uses volume sharing, so this
1346 * output's volume will be set to be equal to the root source's real
1347 * volume. Obviously this output's current volume must not then
1348 * affect what the root source's real volume will be. */
1352 remapped = o->volume;
1353 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1354 pa_cvolume_merge(max_volume, max_volume, &remapped);
1358 /* Called from main thread. Only called for the root source in volume sharing
1359 * cases, except for internal recursive calls. */
1360 static pa_bool_t has_outputs(pa_source *s) {
1361 pa_source_output *o;
1364 pa_source_assert_ref(s);
1366 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1367 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1374 /* Called from main thread. Only called for the root source in volume sharing
1375 * cases, except for internal recursive calls. */
1376 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1377 pa_source_output *o;
1380 pa_source_assert_ref(s);
1381 pa_assert(new_volume);
1382 pa_assert(channel_map);
1384 s->real_volume = *new_volume;
1385 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1387 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1388 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1389 if (pa_source_flat_volume_enabled(s)) {
1390 pa_cvolume old_volume = o->volume;
1392 /* Follow the root source's real volume. */
1393 o->volume = *new_volume;
1394 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1395 compute_reference_ratio(o);
1397 /* The volume changed, let's tell people so */
1398 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1399 if (o->volume_changed)
1400 o->volume_changed(o);
1402 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1406 update_real_volume(o->destination_source, new_volume, channel_map);
1411 /* Called from main thread. Only called for the root source in shared volume
1413 static void compute_real_volume(pa_source *s) {
1414 pa_source_assert_ref(s);
1415 pa_assert_ctl_context();
1416 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1417 pa_assert(pa_source_flat_volume_enabled(s));
1418 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1420 /* This determines the maximum volume of all streams and sets
1421 * s->real_volume accordingly. */
1423 if (!has_outputs(s)) {
1424 /* In the special case that we have no source outputs we leave the
1425 * volume unmodified. */
1426 update_real_volume(s, &s->reference_volume, &s->channel_map);
1430 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1432 /* First let's determine the new maximum volume of all outputs
1433 * connected to this source */
1434 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1435 update_real_volume(s, &s->real_volume, &s->channel_map);
1437 /* Then, let's update the real ratios/soft volumes of all outputs
1438 * connected to this source */
1439 compute_real_ratios(s);
1442 /* Called from main thread. Only called for the root source in shared volume
1443 * cases, except for internal recursive calls. */
1444 static void propagate_reference_volume(pa_source *s) {
1445 pa_source_output *o;
1448 pa_source_assert_ref(s);
1449 pa_assert_ctl_context();
1450 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1451 pa_assert(pa_source_flat_volume_enabled(s));
1453 /* This is called whenever the source volume changes that is not
1454 * caused by a source output volume change. We need to fix up the
1455 * source output volumes accordingly */
1457 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1458 pa_cvolume old_volume;
1460 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1461 propagate_reference_volume(o->destination_source);
1463 /* Since the origin source uses volume sharing, this output's volume
1464 * needs to be updated to match the root source's real volume, but
1465 * that will be done later in update_shared_real_volume(). */
1469 old_volume = o->volume;
1471 /* This basically calculates:
1473 * o->volume := o->reference_volume * o->reference_ratio */
1475 o->volume = s->reference_volume;
1476 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1477 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1479 /* The volume changed, let's tell people so */
1480 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1482 if (o->volume_changed)
1483 o->volume_changed(o);
1485 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1490 /* Called from main thread. Only called for the root source in volume sharing
1491 * cases, except for internal recursive calls. The return value indicates
1492 * whether any reference volume actually changed. */
1493 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1495 pa_bool_t reference_volume_changed;
1496 pa_source_output *o;
1499 pa_source_assert_ref(s);
1500 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1502 pa_assert(channel_map);
1503 pa_assert(pa_cvolume_valid(v));
1506 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1508 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1509 s->reference_volume = volume;
1511 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1513 if (reference_volume_changed)
1514 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1515 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1516 /* If the root source's volume doesn't change, then there can't be any
1517 * changes in the other source in the source tree either.
1519 * It's probably theoretically possible that even if the root source's
1520 * volume changes slightly, some filter source doesn't change its volume
1521 * due to rounding errors. If that happens, we still want to propagate
1522 * the changed root source volume to the sources connected to the
1523 * intermediate source that didn't change its volume. This theoretical
1524 * possibility is the reason why we have that !(s->flags &
1525 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1526 * notice even if we returned here FALSE always if
1527 * reference_volume_changed is FALSE. */
1530 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1531 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1532 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1538 /* Called from main thread */
1539 void pa_source_set_volume(
1541 const pa_cvolume *volume,
1545 pa_cvolume new_reference_volume;
1546 pa_source *root_source;
1548 pa_source_assert_ref(s);
1549 pa_assert_ctl_context();
1550 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1551 pa_assert(!volume || pa_cvolume_valid(volume));
1552 pa_assert(volume || pa_source_flat_volume_enabled(s));
1553 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1555 /* make sure we don't change the volume in PASSTHROUGH mode ...
1556 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1557 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1558 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1562 /* In case of volume sharing, the volume is set for the root source first,
1563 * from which it's then propagated to the sharing sources. */
1564 root_source = pa_source_get_master(s);
1566 if (PA_UNLIKELY(!root_source))
1569 /* As a special exception we accept mono volumes on all sources --
1570 * even on those with more complex channel maps */
1573 if (pa_cvolume_compatible(volume, &s->sample_spec))
1574 new_reference_volume = *volume;
1576 new_reference_volume = s->reference_volume;
1577 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1580 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1582 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1583 if (pa_source_flat_volume_enabled(root_source)) {
1584 /* OK, propagate this volume change back to the outputs */
1585 propagate_reference_volume(root_source);
1587 /* And now recalculate the real volume */
1588 compute_real_volume(root_source);
1590 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1594 /* If volume is NULL we synchronize the source's real and
1595 * reference volumes with the stream volumes. */
1597 pa_assert(pa_source_flat_volume_enabled(root_source));
1599 /* Ok, let's determine the new real volume */
1600 compute_real_volume(root_source);
1602 /* Let's 'push' the reference volume if necessary */
1603 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1604 /* If the source and it's root don't have the same number of channels, we need to remap */
1605 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1606 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1607 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1609 /* Now that the reference volume is updated, we can update the streams'
1610 * reference ratios. */
1611 compute_reference_ratios(root_source);
1614 if (root_source->set_volume) {
1615 /* If we have a function set_volume(), then we do not apply a
1616 * soft volume by default. However, set_volume() is free to
1617 * apply one to root_source->soft_volume */
1619 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1620 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1621 root_source->set_volume(root_source);
1624 /* If we have no function set_volume(), then the soft volume
1625 * becomes the real volume */
1626 root_source->soft_volume = root_source->real_volume;
1628 /* This tells the source that soft volume and/or real volume changed */
1630 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1633 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1634 * Only to be called by source implementor */
1635 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1637 pa_source_assert_ref(s);
1638 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1640 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1641 pa_source_assert_io_context(s);
1643 pa_assert_ctl_context();
1646 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1648 s->soft_volume = *volume;
1650 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1651 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1653 s->thread_info.soft_volume = s->soft_volume;
1656 /* Called from the main thread. Only called for the root source in volume sharing
1657 * cases, except for internal recursive calls. */
1658 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1659 pa_source_output *o;
1662 pa_source_assert_ref(s);
1663 pa_assert(old_real_volume);
1664 pa_assert_ctl_context();
1665 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1667 /* This is called when the hardware's real volume changes due to
1668 * some external event. We copy the real volume into our
1669 * reference volume and then rebuild the stream volumes based on
1670 * i->real_ratio which should stay fixed. */
1672 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1673 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1676 /* 1. Make the real volume the reference volume */
1677 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1680 if (pa_source_flat_volume_enabled(s)) {
1682 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1683 pa_cvolume old_volume = o->volume;
1685 /* 2. Since the source's reference and real volumes are equal
1686 * now our ratios should be too. */
1687 o->reference_ratio = o->real_ratio;
1689 /* 3. Recalculate the new stream reference volume based on the
1690 * reference ratio and the sink's reference volume.
1692 * This basically calculates:
1694 * o->volume = s->reference_volume * o->reference_ratio
1696 * This is identical to propagate_reference_volume() */
1697 o->volume = s->reference_volume;
1698 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1699 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1701 /* Notify if something changed */
1702 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1704 if (o->volume_changed)
1705 o->volume_changed(o);
1707 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1710 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1711 propagate_real_volume(o->destination_source, old_real_volume);
1715 /* Something got changed in the hardware. It probably makes sense
1716 * to save changed hw settings given that hw volume changes not
1717 * triggered by PA are almost certainly done by the user. */
1718 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1719 s->save_volume = TRUE;
1722 /* Called from io thread */
1723 void pa_source_update_volume_and_mute(pa_source *s) {
1725 pa_source_assert_io_context(s);
1727 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1730 /* Called from main thread */
1731 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1732 pa_source_assert_ref(s);
1733 pa_assert_ctl_context();
1734 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1736 if (s->refresh_volume || force_refresh) {
1737 struct pa_cvolume old_real_volume;
1739 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1741 old_real_volume = s->real_volume;
1743 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1746 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1748 update_real_volume(s, &s->real_volume, &s->channel_map);
1749 propagate_real_volume(s, &old_real_volume);
1752 return &s->reference_volume;
1755 /* Called from main thread. In volume sharing cases, only the root source may
1757 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1758 pa_cvolume old_real_volume;
1760 pa_source_assert_ref(s);
1761 pa_assert_ctl_context();
1762 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1763 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1765 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1767 old_real_volume = s->real_volume;
1768 update_real_volume(s, new_real_volume, &s->channel_map);
1769 propagate_real_volume(s, &old_real_volume);
1772 /* Called from main thread */
1773 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1774 pa_bool_t old_muted;
1776 pa_source_assert_ref(s);
1777 pa_assert_ctl_context();
1778 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1780 old_muted = s->muted;
1782 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1784 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute)
1787 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1789 if (old_muted != s->muted)
1790 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1793 /* Called from main thread */
1794 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1796 pa_source_assert_ref(s);
1797 pa_assert_ctl_context();
1798 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1800 if (s->refresh_muted || force_refresh) {
1801 pa_bool_t old_muted = s->muted;
1803 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_mute)
1806 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1808 if (old_muted != s->muted) {
1809 s->save_muted = TRUE;
1811 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1813 /* Make sure the soft mute status stays in sync */
1814 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1821 /* Called from main thread */
1822 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1823 pa_source_assert_ref(s);
1824 pa_assert_ctl_context();
1825 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1827 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1829 if (s->muted == new_muted)
1832 s->muted = new_muted;
1833 s->save_muted = TRUE;
1835 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1838 /* Called from main thread */
1839 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1840 pa_source_assert_ref(s);
1841 pa_assert_ctl_context();
1844 pa_proplist_update(s->proplist, mode, p);
1846 if (PA_SOURCE_IS_LINKED(s->state)) {
1847 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1848 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1854 /* Called from main thread */
1855 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1856 void pa_source_set_description(pa_source *s, const char *description) {
1858 pa_source_assert_ref(s);
1859 pa_assert_ctl_context();
1861 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1864 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1866 if (old && description && pa_streq(old, description))
1870 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1872 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1874 if (PA_SOURCE_IS_LINKED(s->state)) {
1875 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1876 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1880 /* Called from main thread */
1881 unsigned pa_source_linked_by(pa_source *s) {
1882 pa_source_assert_ref(s);
1883 pa_assert_ctl_context();
1884 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1886 return pa_idxset_size(s->outputs);
1889 /* Called from main thread */
1890 unsigned pa_source_used_by(pa_source *s) {
1893 pa_source_assert_ref(s);
1894 pa_assert_ctl_context();
1895 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1897 ret = pa_idxset_size(s->outputs);
1898 pa_assert(ret >= s->n_corked);
1900 return ret - s->n_corked;
1903 /* Called from main thread */
1904 unsigned pa_source_check_suspend(pa_source *s) {
1906 pa_source_output *o;
1909 pa_source_assert_ref(s);
1910 pa_assert_ctl_context();
1912 if (!PA_SOURCE_IS_LINKED(s->state))
1917 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1918 pa_source_output_state_t st;
1920 st = pa_source_output_get_state(o);
1922 /* We do not assert here. It is perfectly valid for a source output to
1923 * be in the INIT state (i.e. created, marked done but not yet put)
1924 * and we should not care if it's unlinked as it won't contribute
1925 * towards our busy status.
1927 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1930 if (st == PA_SOURCE_OUTPUT_CORKED)
1933 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1942 /* Called from the IO thread */
1943 static void sync_output_volumes_within_thread(pa_source *s) {
1944 pa_source_output *o;
1947 pa_source_assert_ref(s);
1948 pa_source_assert_io_context(s);
1950 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1951 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1954 o->thread_info.soft_volume = o->soft_volume;
1955 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1959 /* Called from the IO thread. Only called for the root source in volume sharing
1960 * cases, except for internal recursive calls. */
1961 static void set_shared_volume_within_thread(pa_source *s) {
1962 pa_source_output *o;
1965 pa_source_assert_ref(s);
1967 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1969 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1970 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1971 set_shared_volume_within_thread(o->destination_source);
1975 /* Called from IO thread, except when it is not */
1976 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1977 pa_source *s = PA_SOURCE(object);
1978 pa_source_assert_ref(s);
1980 switch ((pa_source_message_t) code) {
1982 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1983 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1985 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1987 if (o->direct_on_input) {
1988 o->thread_info.direct_on_input = o->direct_on_input;
1989 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1992 pa_assert(!o->thread_info.attached);
1993 o->thread_info.attached = TRUE;
1998 pa_source_output_set_state_within_thread(o, o->state);
2000 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2001 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2003 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2005 /* We don't just invalidate the requested latency here,
2006 * because if we are in a move we might need to fix up the
2007 * requested latency. */
2008 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2010 /* In flat volume mode we need to update the volume as
2012 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2015 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2016 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2018 pa_source_output_set_state_within_thread(o, o->state);
2023 pa_assert(o->thread_info.attached);
2024 o->thread_info.attached = FALSE;
2026 if (o->thread_info.direct_on_input) {
2027 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2028 o->thread_info.direct_on_input = NULL;
2031 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
2032 pa_source_output_unref(o);
2034 pa_source_invalidate_requested_latency(s, TRUE);
2036 /* In flat volume mode we need to update the volume as
2038 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2041 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2042 pa_source *root_source = pa_source_get_master(s);
2044 if (PA_LIKELY(root_source))
2045 set_shared_volume_within_thread(root_source);
2050 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2052 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2054 pa_source_volume_change_push(s);
2056 /* Fall through ... */
2058 case PA_SOURCE_MESSAGE_SET_VOLUME:
2060 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2061 s->thread_info.soft_volume = s->soft_volume;
2064 /* Fall through ... */
2066 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2067 sync_output_volumes_within_thread(s);
2070 case PA_SOURCE_MESSAGE_GET_VOLUME:
2072 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2074 pa_source_volume_change_flush(s);
2075 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2078 /* In case source implementor reset SW volume. */
2079 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2080 s->thread_info.soft_volume = s->soft_volume;
2085 case PA_SOURCE_MESSAGE_SET_MUTE:
2087 if (s->thread_info.soft_muted != s->muted) {
2088 s->thread_info.soft_muted = s->muted;
2091 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2096 case PA_SOURCE_MESSAGE_GET_MUTE:
2098 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2103 case PA_SOURCE_MESSAGE_SET_STATE: {
2105 pa_bool_t suspend_change =
2106 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2107 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2109 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2111 if (suspend_change) {
2112 pa_source_output *o;
2115 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2116 if (o->suspend_within_thread)
2117 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2123 case PA_SOURCE_MESSAGE_DETACH:
2125 /* Detach all streams */
2126 pa_source_detach_within_thread(s);
2129 case PA_SOURCE_MESSAGE_ATTACH:
2131 /* Reattach all streams */
2132 pa_source_attach_within_thread(s);
2135 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2137 pa_usec_t *usec = userdata;
2138 *usec = pa_source_get_requested_latency_within_thread(s);
2140 /* Yes, that's right, the IO thread will see -1 when no
2141 * explicit requested latency is configured, the main
2142 * thread will see max_latency */
2143 if (*usec == (pa_usec_t) -1)
2144 *usec = s->thread_info.max_latency;
2149 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2150 pa_usec_t *r = userdata;
2152 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2157 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2158 pa_usec_t *r = userdata;
2160 r[0] = s->thread_info.min_latency;
2161 r[1] = s->thread_info.max_latency;
2166 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2168 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2171 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2173 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2176 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2178 *((size_t*) userdata) = s->thread_info.max_rewind;
2181 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2183 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2186 case PA_SOURCE_MESSAGE_GET_LATENCY:
2188 if (s->monitor_of) {
2189 *((pa_usec_t*) userdata) = 0;
2193 /* Implementors need to overwrite this implementation! */
2196 case PA_SOURCE_MESSAGE_SET_PORT:
2198 pa_assert(userdata);
2200 struct source_message_set_port *msg_data = userdata;
2201 msg_data->ret = s->set_port(s, msg_data->port);
2205 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2206 /* This message is sent from IO-thread and handled in main thread. */
2207 pa_assert_ctl_context();
2209 /* Make sure we're not messing with main thread when no longer linked */
2210 if (!PA_SOURCE_IS_LINKED(s->state))
2213 pa_source_get_volume(s, TRUE);
2214 pa_source_get_mute(s, TRUE);
2217 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2218 s->thread_info.latency_offset = offset;
2221 case PA_SOURCE_MESSAGE_MAX:
2228 /* Called from main thread */
2229 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2234 pa_core_assert_ref(c);
2235 pa_assert_ctl_context();
2236 pa_assert(cause != 0);
2238 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2241 if (source->monitor_of)
2244 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2251 /* Called from main thread */
2252 void pa_source_detach(pa_source *s) {
2253 pa_source_assert_ref(s);
2254 pa_assert_ctl_context();
2255 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2257 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2260 /* Called from main thread */
2261 void pa_source_attach(pa_source *s) {
2262 pa_source_assert_ref(s);
2263 pa_assert_ctl_context();
2264 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2266 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2269 /* Called from IO thread */
2270 void pa_source_detach_within_thread(pa_source *s) {
2271 pa_source_output *o;
2274 pa_source_assert_ref(s);
2275 pa_source_assert_io_context(s);
2276 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2278 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2283 /* Called from IO thread */
2284 void pa_source_attach_within_thread(pa_source *s) {
2285 pa_source_output *o;
2288 pa_source_assert_ref(s);
2289 pa_source_assert_io_context(s);
2290 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2292 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2297 /* Called from IO thread */
2298 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2299 pa_usec_t result = (pa_usec_t) -1;
2300 pa_source_output *o;
2303 pa_source_assert_ref(s);
2304 pa_source_assert_io_context(s);
2306 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2307 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2309 if (s->thread_info.requested_latency_valid)
2310 return s->thread_info.requested_latency;
2312 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2313 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2314 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2315 result = o->thread_info.requested_source_latency;
2317 if (result != (pa_usec_t) -1)
2318 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2320 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2321 /* Only cache this if we are fully set up */
2322 s->thread_info.requested_latency = result;
2323 s->thread_info.requested_latency_valid = TRUE;
2329 /* Called from main thread */
2330 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2333 pa_source_assert_ref(s);
2334 pa_assert_ctl_context();
2335 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2337 if (s->state == PA_SOURCE_SUSPENDED)
2340 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2345 /* Called from IO thread */
2346 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2347 pa_source_output *o;
2350 pa_source_assert_ref(s);
2351 pa_source_assert_io_context(s);
2353 if (max_rewind == s->thread_info.max_rewind)
2356 s->thread_info.max_rewind = max_rewind;
2358 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2359 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2360 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2363 /* Called from main thread */
2364 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2365 pa_source_assert_ref(s);
2366 pa_assert_ctl_context();
2368 if (PA_SOURCE_IS_LINKED(s->state))
2369 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2371 pa_source_set_max_rewind_within_thread(s, max_rewind);
2374 /* Called from IO thread */
2375 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2376 pa_source_output *o;
2379 pa_source_assert_ref(s);
2380 pa_source_assert_io_context(s);
2382 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2383 s->thread_info.requested_latency_valid = FALSE;
2387 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2389 if (s->update_requested_latency)
2390 s->update_requested_latency(s);
2392 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2393 if (o->update_source_requested_latency)
2394 o->update_source_requested_latency(o);
2398 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2401 /* Called from main thread */
2402 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2403 pa_source_assert_ref(s);
2404 pa_assert_ctl_context();
2406 /* min_latency == 0: no limit
2407 * min_latency anything else: specified limit
2409 * Similar for max_latency */
2411 if (min_latency < ABSOLUTE_MIN_LATENCY)
2412 min_latency = ABSOLUTE_MIN_LATENCY;
2414 if (max_latency <= 0 ||
2415 max_latency > ABSOLUTE_MAX_LATENCY)
2416 max_latency = ABSOLUTE_MAX_LATENCY;
2418 pa_assert(min_latency <= max_latency);
2420 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2421 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2422 max_latency == ABSOLUTE_MAX_LATENCY) ||
2423 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2425 if (PA_SOURCE_IS_LINKED(s->state)) {
2431 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2433 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2436 /* Called from main thread */
2437 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2438 pa_source_assert_ref(s);
2439 pa_assert_ctl_context();
2440 pa_assert(min_latency);
2441 pa_assert(max_latency);
2443 if (PA_SOURCE_IS_LINKED(s->state)) {
2444 pa_usec_t r[2] = { 0, 0 };
2446 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2448 *min_latency = r[0];
2449 *max_latency = r[1];
2451 *min_latency = s->thread_info.min_latency;
2452 *max_latency = s->thread_info.max_latency;
2456 /* Called from IO thread, and from main thread before pa_source_put() is called */
2457 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2458 pa_source_assert_ref(s);
2459 pa_source_assert_io_context(s);
2461 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2462 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2463 pa_assert(min_latency <= max_latency);
2465 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2466 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2467 max_latency == ABSOLUTE_MAX_LATENCY) ||
2468 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2471 if (s->thread_info.min_latency == min_latency &&
2472 s->thread_info.max_latency == max_latency)
2475 s->thread_info.min_latency = min_latency;
2476 s->thread_info.max_latency = max_latency;
2478 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2479 pa_source_output *o;
2482 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2483 if (o->update_source_latency_range)
2484 o->update_source_latency_range(o);
2487 pa_source_invalidate_requested_latency(s, FALSE);
2490 /* Called from main thread, before the source is put */
2491 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2492 pa_source_assert_ref(s);
2493 pa_assert_ctl_context();
2495 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2496 pa_assert(latency == 0);
2500 if (latency < ABSOLUTE_MIN_LATENCY)
2501 latency = ABSOLUTE_MIN_LATENCY;
2503 if (latency > ABSOLUTE_MAX_LATENCY)
2504 latency = ABSOLUTE_MAX_LATENCY;
2506 if (PA_SOURCE_IS_LINKED(s->state))
2507 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2509 s->thread_info.fixed_latency = latency;
2512 /* Called from main thread */
2513 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2516 pa_source_assert_ref(s);
2517 pa_assert_ctl_context();
2519 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2522 if (PA_SOURCE_IS_LINKED(s->state))
2523 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2525 latency = s->thread_info.fixed_latency;
2530 /* Called from IO thread */
2531 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2532 pa_source_assert_ref(s);
2533 pa_source_assert_io_context(s);
2535 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2536 pa_assert(latency == 0);
2537 s->thread_info.fixed_latency = 0;
2542 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2543 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2545 if (s->thread_info.fixed_latency == latency)
2548 s->thread_info.fixed_latency = latency;
2550 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2551 pa_source_output *o;
2554 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2555 if (o->update_source_fixed_latency)
2556 o->update_source_fixed_latency(o);
2559 pa_source_invalidate_requested_latency(s, FALSE);
2562 /* Called from main thread */
2563 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2564 pa_source_assert_ref(s);
2566 s->latency_offset = offset;
2568 if (PA_SOURCE_IS_LINKED(s->state))
2569 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2571 s->thread_info.latency_offset = offset;
2574 /* Called from main thread */
2575 size_t pa_source_get_max_rewind(pa_source *s) {
2577 pa_assert_ctl_context();
2578 pa_source_assert_ref(s);
2580 if (!PA_SOURCE_IS_LINKED(s->state))
2581 return s->thread_info.max_rewind;
2583 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2588 /* Called from main context */
2589 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2590 pa_device_port *port;
2593 pa_source_assert_ref(s);
2594 pa_assert_ctl_context();
2597 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2598 return -PA_ERR_NOTIMPLEMENTED;
2602 return -PA_ERR_NOENTITY;
2604 if (!(port = pa_hashmap_get(s->ports, name)))
2605 return -PA_ERR_NOENTITY;
2607 if (s->active_port == port) {
2608 s->save_port = s->save_port || save;
2612 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2613 struct source_message_set_port msg = { .port = port, .ret = 0 };
2614 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2618 ret = s->set_port(s, port);
2621 return -PA_ERR_NOENTITY;
2623 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2625 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2627 s->active_port = port;
2628 s->save_port = save;
2630 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2635 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2637 /* Called from the IO thread. */
2638 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2639 pa_source_volume_change *c;
2640 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2641 c = pa_xnew(pa_source_volume_change, 1);
2643 PA_LLIST_INIT(pa_source_volume_change, c);
2645 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2649 /* Called from the IO thread. */
2650 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2652 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2656 /* Called from the IO thread. */
2657 void pa_source_volume_change_push(pa_source *s) {
2658 pa_source_volume_change *c = NULL;
2659 pa_source_volume_change *nc = NULL;
2660 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2662 const char *direction = NULL;
2665 nc = pa_source_volume_change_new(s);
2667 /* NOTE: There is already more different volumes in pa_source that I can remember.
2668 * Adding one more volume for HW would get us rid of this, but I am trying
2669 * to survive with the ones we already have. */
2670 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2672 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2673 pa_log_debug("Volume not changing");
2674 pa_source_volume_change_free(nc);
2678 nc->at = pa_source_get_latency_within_thread(s);
2679 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2681 if (s->thread_info.volume_changes_tail) {
2682 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2683 /* If volume is going up let's do it a bit late. If it is going
2684 * down let's do it a bit early. */
2685 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2686 if (nc->at + safety_margin > c->at) {
2687 nc->at += safety_margin;
2692 else if (nc->at - safety_margin > c->at) {
2693 nc->at -= safety_margin;
2701 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2702 nc->at += safety_margin;
2705 nc->at -= safety_margin;
2708 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2711 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2714 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2716 /* We can ignore volume events that came earlier but should happen later than this. */
2717 PA_LLIST_FOREACH(c, nc->next) {
2718 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2719 pa_source_volume_change_free(c);
2722 s->thread_info.volume_changes_tail = nc;
2725 /* Called from the IO thread. */
2726 static void pa_source_volume_change_flush(pa_source *s) {
2727 pa_source_volume_change *c = s->thread_info.volume_changes;
2729 s->thread_info.volume_changes = NULL;
2730 s->thread_info.volume_changes_tail = NULL;
2732 pa_source_volume_change *next = c->next;
2733 pa_source_volume_change_free(c);
2738 /* Called from the IO thread. */
2739 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2741 pa_bool_t ret = FALSE;
2745 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2751 pa_assert(s->write_volume);
2753 now = pa_rtclock_now();
2755 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2756 pa_source_volume_change *c = s->thread_info.volume_changes;
2757 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2758 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2759 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2761 s->thread_info.current_hw_volume = c->hw_volume;
2762 pa_source_volume_change_free(c);
2768 if (s->thread_info.volume_changes) {
2770 *usec_to_next = s->thread_info.volume_changes->at - now;
2771 if (pa_log_ratelimit(PA_LOG_DEBUG))
2772 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2777 s->thread_info.volume_changes_tail = NULL;
2783 /* Called from the main thread */
2784 /* Gets the list of formats supported by the source. The members and idxset must
2785 * be freed by the caller. */
2786 pa_idxset* pa_source_get_formats(pa_source *s) {
2791 if (s->get_formats) {
2792 /* Source supports format query, all is good */
2793 ret = s->get_formats(s);
2795 /* Source doesn't support format query, so assume it does PCM */
2796 pa_format_info *f = pa_format_info_new();
2797 f->encoding = PA_ENCODING_PCM;
2799 ret = pa_idxset_new(NULL, NULL);
2800 pa_idxset_put(ret, f, NULL);
2806 /* Called from the main thread */
2807 /* Checks if the source can accept this format */
2808 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f) {
2809 pa_idxset *formats = NULL;
2810 pa_bool_t ret = FALSE;
2815 formats = pa_source_get_formats(s);
2818 pa_format_info *finfo_device;
2821 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2822 if (pa_format_info_is_compatible(finfo_device, f)) {
2828 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2834 /* Called from the main thread */
2835 /* Calculates the intersection between formats supported by the source and
2836 * in_formats, and returns these, in the order of the source's formats. */
2837 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2838 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2839 pa_format_info *f_source, *f_in;
2844 if (!in_formats || pa_idxset_isempty(in_formats))
2847 source_formats = pa_source_get_formats(s);
2849 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2850 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2851 if (pa_format_info_is_compatible(f_source, f_in))
2852 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2858 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);