2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <pulse/format.h>
29 #include <pulse/utf8.h>
30 #include <pulse/xmalloc.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/rtclock.h>
34 #include <pulse/internal.h>
36 #include <pulsecore/core-util.h>
37 #include <pulsecore/source-output.h>
38 #include <pulsecore/namereg.h>
39 #include <pulsecore/core-subscribe.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/mix.h>
42 #include <pulsecore/flist.h>
46 #define ABSOLUTE_MIN_LATENCY (500)
47 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
48 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
50 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
52 struct pa_source_volume_change {
56 PA_LLIST_FIELDS(pa_source_volume_change);
59 struct source_message_set_port {
64 static void source_free(pa_object *o);
66 static void pa_source_volume_change_push(pa_source *s);
67 static void pa_source_volume_change_flush(pa_source *s);
69 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
73 data->proplist = pa_proplist_new();
74 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
83 data->name = pa_xstrdup(name);
86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
100 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
103 data->alternate_sample_rate_is_set = true;
104 data->alternate_sample_rate = alternate_sample_rate;
107 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
110 if ((data->volume_is_set = !!volume))
111 data->volume = *volume;
114 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
117 data->muted_is_set = true;
121 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
124 pa_xfree(data->active_port);
125 data->active_port = pa_xstrdup(port);
128 void pa_source_new_data_done(pa_source_new_data *data) {
131 pa_proplist_free(data->proplist);
134 pa_hashmap_free(data->ports);
136 pa_xfree(data->name);
137 pa_xfree(data->active_port);
140 /* Called from main context */
141 static void reset_callbacks(pa_source *s) {
145 s->get_volume = NULL;
146 s->set_volume = NULL;
147 s->write_volume = NULL;
150 s->update_requested_latency = NULL;
152 s->get_formats = NULL;
153 s->reconfigure = NULL;
156 /* Called from main context */
157 pa_source* pa_source_new(
159 pa_source_new_data *data,
160 pa_source_flags_t flags) {
164 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
169 pa_assert(data->name);
170 pa_assert_ctl_context();
172 s = pa_msgobject_new(pa_source);
174 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
175 pa_log_debug("Failed to register name %s.", data->name);
180 pa_source_new_data_set_name(data, name);
182 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
184 pa_namereg_unregister(core, name);
188 /* FIXME, need to free s here on failure */
190 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
191 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
193 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
195 if (!data->channel_map_is_set)
196 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
198 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
199 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
201 /* FIXME: There should probably be a general function for checking whether
202 * the source volume is allowed to be set, like there is for source outputs. */
203 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
205 if (!data->volume_is_set) {
206 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
207 data->save_volume = false;
210 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
211 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
213 if (!data->muted_is_set)
217 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
219 pa_device_init_description(data->proplist, data->card);
220 pa_device_init_icon(data->proplist, false);
221 pa_device_init_intended_roles(data->proplist);
223 if (!data->active_port) {
224 pa_device_port *p = pa_device_port_find_best(data->ports);
226 pa_source_new_data_set_port(data, p->name);
229 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
231 pa_namereg_unregister(core, name);
235 s->parent.parent.free = source_free;
236 s->parent.process_msg = pa_source_process_msg;
239 s->state = PA_SOURCE_INIT;
242 s->suspend_cause = data->suspend_cause;
243 pa_source_set_mixer_dirty(s, false);
244 s->name = pa_xstrdup(name);
245 s->proplist = pa_proplist_copy(data->proplist);
246 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
247 s->module = data->module;
248 s->card = data->card;
250 s->priority = pa_device_init_priority(s->proplist);
252 s->sample_spec = data->sample_spec;
253 s->channel_map = data->channel_map;
254 s->default_sample_rate = s->sample_spec.rate;
256 if (data->alternate_sample_rate_is_set)
257 s->alternate_sample_rate = data->alternate_sample_rate;
259 s->alternate_sample_rate = s->core->alternate_sample_rate;
261 if (s->sample_spec.rate == s->alternate_sample_rate) {
262 pa_log_warn("Default and alternate sample rates are the same.");
263 s->alternate_sample_rate = 0;
266 s->outputs = pa_idxset_new(NULL, NULL);
268 s->monitor_of = NULL;
269 s->output_from_master = NULL;
271 s->reference_volume = s->real_volume = data->volume;
272 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
273 s->base_volume = PA_VOLUME_NORM;
274 s->n_volume_steps = PA_VOLUME_NORM+1;
275 s->muted = data->muted;
276 s->refresh_volume = s->refresh_muted = false;
283 /* As a minor optimization we just steal the list instead of
285 s->ports = data->ports;
288 s->active_port = NULL;
289 s->save_port = false;
291 if (data->active_port)
292 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
293 s->save_port = data->save_port;
295 /* Hopefully the active port has already been assigned in the previous call
296 to pa_device_port_find_best, but better safe than sorry */
298 s->active_port = pa_device_port_find_best(s->ports);
301 s->port_latency_offset = s->active_port->latency_offset;
303 s->port_latency_offset = 0;
305 s->save_volume = data->save_volume;
306 s->save_muted = data->save_muted;
308 pa_silence_memchunk_get(
309 &core->silence_cache,
315 s->thread_info.rtpoll = NULL;
316 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
317 (pa_free_cb_t) pa_source_output_unref);
318 s->thread_info.soft_volume = s->soft_volume;
319 s->thread_info.soft_muted = s->muted;
320 s->thread_info.state = s->state;
321 s->thread_info.max_rewind = 0;
322 s->thread_info.requested_latency_valid = false;
323 s->thread_info.requested_latency = 0;
324 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
325 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
326 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
328 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
329 s->thread_info.volume_changes_tail = NULL;
330 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
331 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
332 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
333 s->thread_info.port_latency_offset = s->port_latency_offset;
335 /* FIXME: This should probably be moved to pa_source_put() */
336 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
339 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
341 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
342 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
345 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
346 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
353 /* Called from main context */
354 static int source_set_state(pa_source *s, pa_source_state_t state) {
357 pa_source_state_t original_state;
360 pa_assert_ctl_context();
362 if (s->state == state)
365 original_state = s->state;
368 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
369 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
372 if ((ret = s->set_state(s, state)) < 0)
376 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379 s->set_state(s, original_state);
386 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
387 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
388 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 if (suspend_change) {
395 /* We're suspending or resuming, tell everyone about it */
397 PA_IDXSET_FOREACH(o, s->outputs, idx)
398 if (s->state == PA_SOURCE_SUSPENDED &&
399 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
400 pa_source_output_kill(o);
402 o->suspend(o, state == PA_SOURCE_SUSPENDED);
408 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
414 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
415 pa_source_flags_t flags;
418 pa_assert(!s->write_volume || cb);
422 /* Save the current flags so we can tell if they've changed */
426 /* The source implementor is responsible for setting decibel volume support */
427 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
429 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
430 /* See note below in pa_source_put() about volume sharing and decibel volumes */
431 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
434 /* If the flags have changed after init, let any clients know via a change event */
435 if (s->state != PA_SOURCE_INIT && flags != s->flags)
436 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
439 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
440 pa_source_flags_t flags;
443 pa_assert(!cb || s->set_volume);
445 s->write_volume = cb;
447 /* Save the current flags so we can tell if they've changed */
451 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
453 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
455 /* If the flags have changed after init, let any clients know via a change event */
456 if (s->state != PA_SOURCE_INIT && flags != s->flags)
457 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
460 void pa_source_set_get_mute_callback(pa_source *s, pa_source_get_mute_cb_t cb) {
466 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
467 pa_source_flags_t flags;
473 /* Save the current flags so we can tell if they've changed */
477 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
479 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
481 /* If the flags have changed after init, let any clients know via a change event */
482 if (s->state != PA_SOURCE_INIT && flags != s->flags)
483 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 static void enable_flat_volume(pa_source *s, bool enable) {
487 pa_source_flags_t flags;
491 /* Always follow the overall user preference here */
492 enable = enable && s->core->flat_volumes;
494 /* Save the current flags so we can tell if they've changed */
498 s->flags |= PA_SOURCE_FLAT_VOLUME;
500 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
502 /* If the flags have changed after init, let any clients know via a change event */
503 if (s->state != PA_SOURCE_INIT && flags != s->flags)
504 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
508 pa_source_flags_t flags;
512 /* Save the current flags so we can tell if they've changed */
516 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
517 enable_flat_volume(s, true);
519 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
520 enable_flat_volume(s, false);
523 /* If the flags have changed after init, let any clients know via a change event */
524 if (s->state != PA_SOURCE_INIT && flags != s->flags)
525 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
528 /* Called from main context */
529 void pa_source_put(pa_source *s) {
530 pa_source_assert_ref(s);
531 pa_assert_ctl_context();
533 pa_assert(s->state == PA_SOURCE_INIT);
534 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || pa_source_is_filter(s));
536 /* The following fields must be initialized properly when calling _put() */
537 pa_assert(s->asyncmsgq);
538 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
540 /* Generally, flags should be initialized via pa_source_new(). As a
541 * special exception we allow some volume related flags to be set
542 * between _new() and _put() by the callback setter functions above.
544 * Thus we implement a couple safeguards here which ensure the above
545 * setters were used (or at least the implementor made manual changes
546 * in a compatible way).
548 * Note: All of these flags set here can change over the life time
550 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
551 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
552 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
554 /* XXX: Currently decibel volume is disabled for all sources that use volume
555 * sharing. When the master source supports decibel volume, it would be good
556 * to have the flag also in the filter source, but currently we don't do that
557 * so that the flags of the filter source never change when it's moved from
558 * a master source to another. One solution for this problem would be to
559 * remove user-visible volume altogether from filter sources when volume
560 * sharing is used, but the current approach was easier to implement... */
561 /* We always support decibel volumes in software, otherwise we leave it to
562 * the source implementor to set this flag as needed.
564 * Note: This flag can also change over the life time of the source. */
565 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
566 pa_source_enable_decibel_volume(s, true);
567 s->soft_volume = s->reference_volume;
570 /* If the source implementor support DB volumes by itself, we should always
571 * try and enable flat volumes too */
572 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
573 enable_flat_volume(s, true);
575 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
576 pa_source *root_source = pa_source_get_master(s);
578 pa_assert(PA_LIKELY(root_source));
580 s->reference_volume = root_source->reference_volume;
581 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
583 s->real_volume = root_source->real_volume;
584 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
586 /* We assume that if the sink implementor changed the default
587 * volume he did so in real_volume, because that is the usual
588 * place where he is supposed to place his changes. */
589 s->reference_volume = s->real_volume;
591 s->thread_info.soft_volume = s->soft_volume;
592 s->thread_info.soft_muted = s->muted;
593 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
595 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
596 || (s->base_volume == PA_VOLUME_NORM
597 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
598 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
599 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
601 if (s->suspend_cause)
602 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
604 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
606 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
607 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
609 /* This function must be called after the PA_CORE_HOOK_SOURCE_PUT hook,
610 * because module-switch-on-connect needs to know the old default source */
611 pa_core_update_default_source(s->core);
614 /* Called from main context */
615 void pa_source_unlink(pa_source *s) {
617 pa_source_output *o, PA_UNUSED *j = NULL;
619 pa_source_assert_ref(s);
620 pa_assert_ctl_context();
622 /* See pa_sink_unlink() for a couple of comments how this function
625 if (s->unlink_requested)
628 s->unlink_requested = true;
630 linked = PA_SOURCE_IS_LINKED(s->state);
633 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
635 if (s->state != PA_SOURCE_UNLINKED)
636 pa_namereg_unregister(s->core, s->name);
637 pa_idxset_remove_by_data(s->core->sources, s, NULL);
639 pa_core_update_default_source(s->core);
642 pa_idxset_remove_by_data(s->card->sources, s, NULL);
644 while ((o = pa_idxset_first(s->outputs, NULL))) {
646 pa_source_output_kill(o);
651 source_set_state(s, PA_SOURCE_UNLINKED);
653 s->state = PA_SOURCE_UNLINKED;
658 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
659 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
663 /* Called from main context */
664 static void source_free(pa_object *o) {
665 pa_source *s = PA_SOURCE(o);
668 pa_assert_ctl_context();
669 pa_assert(pa_source_refcnt(s) == 0);
670 pa_assert(!PA_SOURCE_IS_LINKED(s->state));
672 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
674 pa_source_volume_change_flush(s);
676 pa_idxset_free(s->outputs, NULL);
677 pa_hashmap_free(s->thread_info.outputs);
679 if (s->silence.memblock)
680 pa_memblock_unref(s->silence.memblock);
686 pa_proplist_free(s->proplist);
689 pa_hashmap_free(s->ports);
694 /* Called from main context, and not while the IO thread is active, please */
695 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
696 pa_source_assert_ref(s);
697 pa_assert_ctl_context();
702 /* Called from main context, and not while the IO thread is active, please */
703 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
704 pa_source_flags_t old_flags;
705 pa_source_output *output;
708 pa_source_assert_ref(s);
709 pa_assert_ctl_context();
711 /* For now, allow only a minimal set of flags to be changed. */
712 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
714 old_flags = s->flags;
715 s->flags = (s->flags & ~mask) | (value & mask);
717 if (s->flags == old_flags)
720 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
721 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
723 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
724 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
725 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
727 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
728 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
730 PA_IDXSET_FOREACH(output, s->outputs, idx) {
731 if (output->destination_source)
732 pa_source_update_flags(output->destination_source, mask, value);
736 /* Called from IO context, or before _put() from main context */
737 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
738 pa_source_assert_ref(s);
739 pa_source_assert_io_context(s);
741 s->thread_info.rtpoll = p;
744 /* Called from main context */
745 int pa_source_update_status(pa_source*s) {
746 pa_source_assert_ref(s);
747 pa_assert_ctl_context();
748 pa_assert(PA_SOURCE_IS_LINKED(s->state));
750 if (s->state == PA_SOURCE_SUSPENDED)
753 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
756 /* Called from any context - must be threadsafe */
757 void pa_source_set_mixer_dirty(pa_source *s, bool is_dirty) {
758 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
761 /* Called from main context */
762 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
763 pa_source_assert_ref(s);
764 pa_assert_ctl_context();
765 pa_assert(PA_SOURCE_IS_LINKED(s->state));
766 pa_assert(cause != 0);
768 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
769 return -PA_ERR_NOTSUPPORTED;
772 s->suspend_cause |= cause;
774 s->suspend_cause &= ~cause;
776 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
777 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
778 it'll be handled just fine. */
779 pa_source_set_mixer_dirty(s, false);
780 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
781 if (s->active_port && s->set_port) {
782 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
783 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
784 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
787 s->set_port(s, s->active_port);
797 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
800 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
802 if (s->suspend_cause)
803 return source_set_state(s, PA_SOURCE_SUSPENDED);
805 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
808 /* Called from main context */
809 int pa_source_sync_suspend(pa_source *s) {
810 pa_sink_state_t state;
812 pa_source_assert_ref(s);
813 pa_assert_ctl_context();
814 pa_assert(PA_SOURCE_IS_LINKED(s->state));
815 pa_assert(s->monitor_of);
817 state = pa_sink_get_state(s->monitor_of);
819 if (state == PA_SINK_SUSPENDED)
820 return source_set_state(s, PA_SOURCE_SUSPENDED);
822 pa_assert(PA_SINK_IS_OPENED(state));
824 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
827 /* Called from main context */
828 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
829 pa_source_output *o, *n;
832 pa_source_assert_ref(s);
833 pa_assert_ctl_context();
834 pa_assert(PA_SOURCE_IS_LINKED(s->state));
839 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
840 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
842 pa_source_output_ref(o);
844 if (pa_source_output_start_move(o) >= 0)
847 pa_source_output_unref(o);
853 /* Called from main context */
854 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
857 pa_source_assert_ref(s);
858 pa_assert_ctl_context();
859 pa_assert(PA_SOURCE_IS_LINKED(s->state));
862 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
863 if (pa_source_output_finish_move(o, s, save) < 0)
864 pa_source_output_fail_move(o);
866 pa_source_output_unref(o);
869 pa_queue_free(q, NULL);
872 /* Called from main context */
873 void pa_source_move_all_fail(pa_queue *q) {
876 pa_assert_ctl_context();
879 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
880 pa_source_output_fail_move(o);
881 pa_source_output_unref(o);
884 pa_queue_free(q, NULL);
887 /* Called from IO thread context */
888 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
892 pa_source_assert_ref(s);
893 pa_source_assert_io_context(s);
894 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
899 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
902 pa_log_debug("Processing rewind...");
904 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
905 pa_source_output_assert_ref(o);
906 pa_source_output_process_rewind(o, nbytes);
910 /* Called from IO thread context */
911 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
915 pa_source_assert_ref(s);
916 pa_source_assert_io_context(s);
917 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
920 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
923 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
924 pa_memchunk vchunk = *chunk;
926 pa_memblock_ref(vchunk.memblock);
927 pa_memchunk_make_writable(&vchunk, 0);
929 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
930 pa_silence_memchunk(&vchunk, &s->sample_spec);
932 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
934 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
935 pa_source_output_assert_ref(o);
937 if (!o->thread_info.direct_on_input)
938 pa_source_output_push(o, &vchunk);
941 pa_memblock_unref(vchunk.memblock);
944 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
945 pa_source_output_assert_ref(o);
947 if (!o->thread_info.direct_on_input)
948 pa_source_output_push(o, chunk);
953 /* Called from IO thread context */
954 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
955 pa_source_assert_ref(s);
956 pa_source_assert_io_context(s);
957 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
958 pa_source_output_assert_ref(o);
959 pa_assert(o->thread_info.direct_on_input);
962 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
965 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
966 pa_memchunk vchunk = *chunk;
968 pa_memblock_ref(vchunk.memblock);
969 pa_memchunk_make_writable(&vchunk, 0);
971 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
972 pa_silence_memchunk(&vchunk, &s->sample_spec);
974 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
976 pa_source_output_push(o, &vchunk);
978 pa_memblock_unref(vchunk.memblock);
980 pa_source_output_push(o, chunk);
983 /* Called from main thread */
984 int pa_source_reconfigure(pa_source *s, pa_sample_spec *spec, bool passthrough) {
986 pa_sample_spec desired_spec;
987 uint32_t default_rate = s->default_sample_rate;
988 uint32_t alternate_rate = s->alternate_sample_rate;
989 bool default_rate_is_usable = false;
990 bool alternate_rate_is_usable = false;
991 bool avoid_resampling = s->core->avoid_resampling;
993 /* We currently only try to reconfigure the sample rate */
995 if (pa_sample_spec_equal(spec, &s->sample_spec))
998 if (!s->reconfigure && !s->monitor_of)
1001 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1002 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1006 if (PA_SOURCE_IS_RUNNING(s->state)) {
1007 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
1008 s->sample_spec.rate);
1012 if (s->monitor_of) {
1013 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
1014 pa_log_info("Cannot update rate, this is a monitor source and the sink is running.");
1019 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1022 desired_spec = s->sample_spec;
1025 /* We have to try to use the source output rate */
1026 desired_spec.rate = spec->rate;
1028 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1029 /* We just try to set the source output's sample rate if it's not too low */
1030 desired_spec.rate = spec->rate;
1032 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1033 /* We can directly try to use this rate */
1034 desired_spec.rate = spec->rate;
1037 /* See if we can pick a rate that results in less resampling effort */
1038 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1039 default_rate_is_usable = true;
1040 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1041 default_rate_is_usable = true;
1042 if (alternate_rate && alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1043 alternate_rate_is_usable = true;
1044 if (alternate_rate && alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1045 alternate_rate_is_usable = true;
1047 if (alternate_rate_is_usable && !default_rate_is_usable)
1048 desired_spec.rate = alternate_rate;
1050 desired_spec.rate = default_rate;
1053 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_source_is_passthrough(s))
1056 if (!passthrough && pa_source_used_by(s) > 0)
1059 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1060 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1063 ret = s->reconfigure(s, &desired_spec, passthrough);
1065 /* This is a monitor source. */
1067 /* XXX: This code is written with non-passthrough streams in mind. I
1068 * have no idea whether the behaviour with passthrough streams is
1071 pa_sample_spec old_spec = s->sample_spec;
1073 s->sample_spec = desired_spec;
1074 ret = pa_sink_reconfigure(s->monitor_of, &desired_spec, false);
1077 /* Changing the sink rate failed, roll back the old rate for
1078 * the monitor source. Why did we set the source rate before
1079 * calling pa_sink_reconfigure(), you may ask. The reason is
1080 * that pa_sink_reconfigure() tries to update the monitor
1081 * source rate, but we are already in the process of updating
1082 * the monitor source rate, so there's a risk of entering an
1083 * infinite loop. Setting the source rate before calling
1084 * pa_sink_reconfigure() makes the rate == s->sample_spec.rate
1085 * check in the beginning of this function return early, so we
1087 s->sample_spec = old_spec;
1095 pa_source_output *o;
1097 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1098 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1099 pa_source_output_update_rate(o);
1102 pa_log_info("Changed sampling rate successfully");
1105 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1110 /* Called from main thread */
1111 pa_usec_t pa_source_get_latency(pa_source *s) {
1114 pa_source_assert_ref(s);
1115 pa_assert_ctl_context();
1116 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1118 if (s->state == PA_SOURCE_SUSPENDED)
1121 if (!(s->flags & PA_SOURCE_LATENCY))
1124 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1126 /* The return value is unsigned, so check that the offset can be added to usec without
1128 if (-s->port_latency_offset <= usec)
1129 usec += s->port_latency_offset;
1133 return (pa_usec_t)usec;
1136 /* Called from IO thread */
1137 int64_t pa_source_get_latency_within_thread(pa_source *s, bool allow_negative) {
1141 pa_source_assert_ref(s);
1142 pa_source_assert_io_context(s);
1143 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1145 /* The returned value is supposed to be in the time domain of the sound card! */
1147 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1150 if (!(s->flags & PA_SOURCE_LATENCY))
1153 o = PA_MSGOBJECT(s);
1155 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1157 o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1159 /* If allow_negative is false, the call should only return positive values, */
1160 usec += s->thread_info.port_latency_offset;
1161 if (!allow_negative && usec < 0)
1167 /* Called from the main thread (and also from the IO thread while the main
1168 * thread is waiting).
1170 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1171 * set. Instead, flat volume mode is detected by checking whether the root source
1172 * has the flag set. */
1173 bool pa_source_flat_volume_enabled(pa_source *s) {
1174 pa_source_assert_ref(s);
1176 s = pa_source_get_master(s);
1179 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1184 /* Called from the main thread (and also from the IO thread while the main
1185 * thread is waiting). */
1186 pa_source *pa_source_get_master(pa_source *s) {
1187 pa_source_assert_ref(s);
1189 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1190 if (PA_UNLIKELY(!s->output_from_master))
1193 s = s->output_from_master->source;
1199 /* Called from main context */
1200 bool pa_source_is_filter(pa_source *s) {
1201 pa_source_assert_ref(s);
1203 return (s->output_from_master != NULL);
1206 /* Called from main context */
1207 bool pa_source_is_passthrough(pa_source *s) {
1209 pa_source_assert_ref(s);
1211 /* NB Currently only monitor sources support passthrough mode */
1212 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1215 /* Called from main context */
1216 void pa_source_enter_passthrough(pa_source *s) {
1219 /* set the volume to NORM */
1220 s->saved_volume = *pa_source_get_volume(s, true);
1221 s->saved_save_volume = s->save_volume;
1223 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1224 pa_source_set_volume(s, &volume, true, false);
1227 /* Called from main context */
1228 void pa_source_leave_passthrough(pa_source *s) {
1229 /* Restore source volume to what it was before we entered passthrough mode */
1230 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1232 pa_cvolume_init(&s->saved_volume);
1233 s->saved_save_volume = false;
1236 /* Called from main context. */
1237 static void compute_reference_ratio(pa_source_output *o) {
1239 pa_cvolume remapped;
1243 pa_assert(pa_source_flat_volume_enabled(o->source));
1246 * Calculates the reference ratio from the source's reference
1247 * volume. This basically calculates:
1249 * o->reference_ratio = o->volume / o->source->reference_volume
1252 remapped = o->source->reference_volume;
1253 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1255 ratio = o->reference_ratio;
1257 for (c = 0; c < o->sample_spec.channels; c++) {
1259 /* We don't update when the source volume is 0 anyway */
1260 if (remapped.values[c] <= PA_VOLUME_MUTED)
1263 /* Don't update the reference ratio unless necessary */
1264 if (pa_sw_volume_multiply(
1266 remapped.values[c]) == o->volume.values[c])
1269 ratio.values[c] = pa_sw_volume_divide(
1270 o->volume.values[c],
1271 remapped.values[c]);
1274 pa_source_output_set_reference_ratio(o, &ratio);
1277 /* Called from main context. Only called for the root source in volume sharing
1278 * cases, except for internal recursive calls. */
1279 static void compute_reference_ratios(pa_source *s) {
1281 pa_source_output *o;
1283 pa_source_assert_ref(s);
1284 pa_assert_ctl_context();
1285 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1286 pa_assert(pa_source_flat_volume_enabled(s));
1288 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1289 compute_reference_ratio(o);
1291 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1292 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1293 compute_reference_ratios(o->destination_source);
1297 /* Called from main context. Only called for the root source in volume sharing
1298 * cases, except for internal recursive calls. */
1299 static void compute_real_ratios(pa_source *s) {
1300 pa_source_output *o;
1303 pa_source_assert_ref(s);
1304 pa_assert_ctl_context();
1305 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1306 pa_assert(pa_source_flat_volume_enabled(s));
1308 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1310 pa_cvolume remapped;
1312 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1313 /* The origin source uses volume sharing, so this input's real ratio
1314 * is handled as a special case - the real ratio must be 0 dB, and
1315 * as a result i->soft_volume must equal i->volume_factor. */
1316 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1317 o->soft_volume = o->volume_factor;
1319 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1320 compute_real_ratios(o->destination_source);
1326 * This basically calculates:
1328 * i->real_ratio := i->volume / s->real_volume
1329 * i->soft_volume := i->real_ratio * i->volume_factor
1332 remapped = s->real_volume;
1333 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1335 o->real_ratio.channels = o->sample_spec.channels;
1336 o->soft_volume.channels = o->sample_spec.channels;
1338 for (c = 0; c < o->sample_spec.channels; c++) {
1340 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1341 /* We leave o->real_ratio untouched */
1342 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1346 /* Don't lose accuracy unless necessary */
1347 if (pa_sw_volume_multiply(
1348 o->real_ratio.values[c],
1349 remapped.values[c]) != o->volume.values[c])
1351 o->real_ratio.values[c] = pa_sw_volume_divide(
1352 o->volume.values[c],
1353 remapped.values[c]);
1355 o->soft_volume.values[c] = pa_sw_volume_multiply(
1356 o->real_ratio.values[c],
1357 o->volume_factor.values[c]);
1360 /* We don't copy the soft_volume to the thread_info data
1361 * here. That must be done by the caller */
1365 static pa_cvolume *cvolume_remap_minimal_impact(
1367 const pa_cvolume *template,
1368 const pa_channel_map *from,
1369 const pa_channel_map *to) {
1374 pa_assert(template);
1377 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1378 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1380 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1381 * mapping from source output to source volumes:
1383 * If template is a possible remapping from v it is used instead
1384 * of remapping anew.
1386 * If the channel maps don't match we set an all-channel volume on
1387 * the source to ensure that changing a volume on one stream has no
1388 * effect that cannot be compensated for in another stream that
1389 * does not have the same channel map as the source. */
1391 if (pa_channel_map_equal(from, to))
1395 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1400 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1404 /* Called from main thread. Only called for the root source in volume sharing
1405 * cases, except for internal recursive calls. */
1406 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1407 pa_source_output *o;
1410 pa_source_assert_ref(s);
1411 pa_assert(max_volume);
1412 pa_assert(channel_map);
1413 pa_assert(pa_source_flat_volume_enabled(s));
1415 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1416 pa_cvolume remapped;
1418 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1419 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1420 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1422 /* Ignore this output. The origin source uses volume sharing, so this
1423 * output's volume will be set to be equal to the root source's real
1424 * volume. Obviously this output's current volume must not then
1425 * affect what the root source's real volume will be. */
1429 remapped = o->volume;
1430 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1431 pa_cvolume_merge(max_volume, max_volume, &remapped);
1435 /* Called from main thread. Only called for the root source in volume sharing
1436 * cases, except for internal recursive calls. */
1437 static bool has_outputs(pa_source *s) {
1438 pa_source_output *o;
1441 pa_source_assert_ref(s);
1443 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1444 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1451 /* Called from main thread. Only called for the root source in volume sharing
1452 * cases, except for internal recursive calls. */
1453 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1454 pa_source_output *o;
1457 pa_source_assert_ref(s);
1458 pa_assert(new_volume);
1459 pa_assert(channel_map);
1461 s->real_volume = *new_volume;
1462 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1464 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1465 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1466 if (pa_source_flat_volume_enabled(s)) {
1467 pa_cvolume new_output_volume;
1469 /* Follow the root source's real volume. */
1470 new_output_volume = *new_volume;
1471 pa_cvolume_remap(&new_output_volume, channel_map, &o->channel_map);
1472 pa_source_output_set_volume_direct(o, &new_output_volume);
1473 compute_reference_ratio(o);
1476 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1477 update_real_volume(o->destination_source, new_volume, channel_map);
1482 /* Called from main thread. Only called for the root source in shared volume
1484 static void compute_real_volume(pa_source *s) {
1485 pa_source_assert_ref(s);
1486 pa_assert_ctl_context();
1487 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1488 pa_assert(pa_source_flat_volume_enabled(s));
1489 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1491 /* This determines the maximum volume of all streams and sets
1492 * s->real_volume accordingly. */
1494 if (!has_outputs(s)) {
1495 /* In the special case that we have no source outputs we leave the
1496 * volume unmodified. */
1497 update_real_volume(s, &s->reference_volume, &s->channel_map);
1501 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1503 /* First let's determine the new maximum volume of all outputs
1504 * connected to this source */
1505 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1506 update_real_volume(s, &s->real_volume, &s->channel_map);
1508 /* Then, let's update the real ratios/soft volumes of all outputs
1509 * connected to this source */
1510 compute_real_ratios(s);
1513 /* Called from main thread. Only called for the root source in shared volume
1514 * cases, except for internal recursive calls. */
1515 static void propagate_reference_volume(pa_source *s) {
1516 pa_source_output *o;
1519 pa_source_assert_ref(s);
1520 pa_assert_ctl_context();
1521 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1522 pa_assert(pa_source_flat_volume_enabled(s));
1524 /* This is called whenever the source volume changes that is not
1525 * caused by a source output volume change. We need to fix up the
1526 * source output volumes accordingly */
1528 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1529 pa_cvolume new_volume;
1531 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1532 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1533 propagate_reference_volume(o->destination_source);
1535 /* Since the origin source uses volume sharing, this output's volume
1536 * needs to be updated to match the root source's real volume, but
1537 * that will be done later in update_real_volume(). */
1541 /* This basically calculates:
1543 * o->volume := o->reference_volume * o->reference_ratio */
1545 new_volume = s->reference_volume;
1546 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1547 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1548 pa_source_output_set_volume_direct(o, &new_volume);
1552 /* Called from main thread. Only called for the root source in volume sharing
1553 * cases, except for internal recursive calls. The return value indicates
1554 * whether any reference volume actually changed. */
1555 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1557 bool reference_volume_changed;
1558 pa_source_output *o;
1561 pa_source_assert_ref(s);
1562 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1564 pa_assert(channel_map);
1565 pa_assert(pa_cvolume_valid(v));
1568 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1570 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1571 pa_source_set_reference_volume_direct(s, &volume);
1573 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1575 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1576 /* If the root source's volume doesn't change, then there can't be any
1577 * changes in the other source in the source tree either.
1579 * It's probably theoretically possible that even if the root source's
1580 * volume changes slightly, some filter source doesn't change its volume
1581 * due to rounding errors. If that happens, we still want to propagate
1582 * the changed root source volume to the sources connected to the
1583 * intermediate source that didn't change its volume. This theoretical
1584 * possibility is the reason why we have that !(s->flags &
1585 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1586 * notice even if we returned here false always if
1587 * reference_volume_changed is false. */
1590 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1591 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1592 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1593 update_reference_volume(o->destination_source, v, channel_map, false);
1599 /* Called from main thread */
1600 void pa_source_set_volume(
1602 const pa_cvolume *volume,
1606 pa_cvolume new_reference_volume, root_real_volume;
1607 pa_source *root_source;
1609 pa_source_assert_ref(s);
1610 pa_assert_ctl_context();
1611 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1612 pa_assert(!volume || pa_cvolume_valid(volume));
1613 pa_assert(volume || pa_source_flat_volume_enabled(s));
1614 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1616 /* make sure we don't change the volume in PASSTHROUGH mode ...
1617 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1618 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1619 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1623 /* In case of volume sharing, the volume is set for the root source first,
1624 * from which it's then propagated to the sharing sources. */
1625 root_source = pa_source_get_master(s);
1627 if (PA_UNLIKELY(!root_source))
1630 /* As a special exception we accept mono volumes on all sources --
1631 * even on those with more complex channel maps */
1634 if (pa_cvolume_compatible(volume, &s->sample_spec))
1635 new_reference_volume = *volume;
1637 new_reference_volume = s->reference_volume;
1638 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1641 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1643 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1644 if (pa_source_flat_volume_enabled(root_source)) {
1645 /* OK, propagate this volume change back to the outputs */
1646 propagate_reference_volume(root_source);
1648 /* And now recalculate the real volume */
1649 compute_real_volume(root_source);
1651 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1655 /* If volume is NULL we synchronize the source's real and
1656 * reference volumes with the stream volumes. */
1658 pa_assert(pa_source_flat_volume_enabled(root_source));
1660 /* Ok, let's determine the new real volume */
1661 compute_real_volume(root_source);
1663 /* To propagate the reference volume from the filter to the root source,
1664 * we first take the real volume from the root source and remap it to
1665 * match the filter. Then, we merge in the reference volume from the
1666 * filter on top of this, and remap it back to the root source channel
1668 root_real_volume = root_source->real_volume;
1669 /* First we remap root's real volume to filter channel count and map if needed */
1670 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1671 pa_cvolume_remap(&root_real_volume, &root_source->channel_map, &s->channel_map);
1672 /* Then let's 'push' the reference volume if necessary */
1673 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_real_volume);
1674 /* If the source and its root don't have the same number of channels, we need to remap back */
1675 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1676 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1678 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1680 /* Now that the reference volume is updated, we can update the streams'
1681 * reference ratios. */
1682 compute_reference_ratios(root_source);
1685 if (root_source->set_volume) {
1686 /* If we have a function set_volume(), then we do not apply a
1687 * soft volume by default. However, set_volume() is free to
1688 * apply one to root_source->soft_volume */
1690 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1691 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1692 root_source->set_volume(root_source);
1695 /* If we have no function set_volume(), then the soft volume
1696 * becomes the real volume */
1697 root_source->soft_volume = root_source->real_volume;
1699 /* This tells the source that soft volume and/or real volume changed */
1701 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1704 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1705 * Only to be called by source implementor */
1706 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1708 pa_source_assert_ref(s);
1709 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1711 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1712 pa_source_assert_io_context(s);
1714 pa_assert_ctl_context();
1717 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1719 s->soft_volume = *volume;
1721 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1722 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1724 s->thread_info.soft_volume = s->soft_volume;
1727 /* Called from the main thread. Only called for the root source in volume sharing
1728 * cases, except for internal recursive calls. */
1729 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1730 pa_source_output *o;
1733 pa_source_assert_ref(s);
1734 pa_assert(old_real_volume);
1735 pa_assert_ctl_context();
1736 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1738 /* This is called when the hardware's real volume changes due to
1739 * some external event. We copy the real volume into our
1740 * reference volume and then rebuild the stream volumes based on
1741 * i->real_ratio which should stay fixed. */
1743 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1744 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1747 /* 1. Make the real volume the reference volume */
1748 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1751 if (pa_source_flat_volume_enabled(s)) {
1752 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1753 pa_cvolume new_volume;
1755 /* 2. Since the source's reference and real volumes are equal
1756 * now our ratios should be too. */
1757 pa_source_output_set_reference_ratio(o, &o->real_ratio);
1759 /* 3. Recalculate the new stream reference volume based on the
1760 * reference ratio and the sink's reference volume.
1762 * This basically calculates:
1764 * o->volume = s->reference_volume * o->reference_ratio
1766 * This is identical to propagate_reference_volume() */
1767 new_volume = s->reference_volume;
1768 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1769 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1770 pa_source_output_set_volume_direct(o, &new_volume);
1772 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1773 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1774 propagate_real_volume(o->destination_source, old_real_volume);
1778 /* Something got changed in the hardware. It probably makes sense
1779 * to save changed hw settings given that hw volume changes not
1780 * triggered by PA are almost certainly done by the user. */
1781 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1782 s->save_volume = true;
1785 /* Called from io thread */
1786 void pa_source_update_volume_and_mute(pa_source *s) {
1788 pa_source_assert_io_context(s);
1790 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1793 /* Called from main thread */
1794 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1795 pa_source_assert_ref(s);
1796 pa_assert_ctl_context();
1797 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1799 if (s->refresh_volume || force_refresh) {
1800 struct pa_cvolume old_real_volume;
1802 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1804 old_real_volume = s->real_volume;
1806 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1809 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1811 update_real_volume(s, &s->real_volume, &s->channel_map);
1812 propagate_real_volume(s, &old_real_volume);
1815 return &s->reference_volume;
1818 /* Called from main thread. In volume sharing cases, only the root source may
1820 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1821 pa_cvolume old_real_volume;
1823 pa_source_assert_ref(s);
1824 pa_assert_ctl_context();
1825 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1826 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1828 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1830 old_real_volume = s->real_volume;
1831 update_real_volume(s, new_real_volume, &s->channel_map);
1832 propagate_real_volume(s, &old_real_volume);
1835 /* Called from main thread */
1836 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1839 pa_source_assert_ref(s);
1840 pa_assert_ctl_context();
1842 old_muted = s->muted;
1844 if (mute == old_muted) {
1845 s->save_muted |= save;
1850 s->save_muted = save;
1852 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute) {
1853 s->set_mute_in_progress = true;
1855 s->set_mute_in_progress = false;
1858 if (!PA_SOURCE_IS_LINKED(s->state))
1861 pa_log_debug("The mute of source %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
1862 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1863 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1864 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_MUTE_CHANGED], s);
1867 /* Called from main thread */
1868 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1870 pa_source_assert_ref(s);
1871 pa_assert_ctl_context();
1872 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1874 if ((s->refresh_muted || force_refresh) && s->get_mute) {
1877 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1878 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
1879 pa_source_mute_changed(s, mute);
1881 if (s->get_mute(s, &mute) >= 0)
1882 pa_source_mute_changed(s, mute);
1889 /* Called from main thread */
1890 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1891 pa_source_assert_ref(s);
1892 pa_assert_ctl_context();
1893 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1895 if (s->set_mute_in_progress)
1898 /* pa_source_set_mute() does this same check, so this may appear redundant,
1899 * but we must have this here also, because the save parameter of
1900 * pa_source_set_mute() would otherwise have unintended side effects
1901 * (saving the mute state when it shouldn't be saved). */
1902 if (new_muted == s->muted)
1905 pa_source_set_mute(s, new_muted, true);
1908 /* Called from main thread */
1909 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1910 pa_source_assert_ref(s);
1911 pa_assert_ctl_context();
1914 pa_proplist_update(s->proplist, mode, p);
1916 if (PA_SOURCE_IS_LINKED(s->state)) {
1917 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1918 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1924 /* Called from main thread */
1925 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1926 void pa_source_set_description(pa_source *s, const char *description) {
1928 pa_source_assert_ref(s);
1929 pa_assert_ctl_context();
1931 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1934 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1936 if (old && description && pa_streq(old, description))
1940 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1942 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1944 if (PA_SOURCE_IS_LINKED(s->state)) {
1945 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1946 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1950 /* Called from main thread */
1951 unsigned pa_source_linked_by(pa_source *s) {
1952 pa_source_assert_ref(s);
1953 pa_assert_ctl_context();
1954 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1956 return pa_idxset_size(s->outputs);
1959 /* Called from main thread */
1960 unsigned pa_source_used_by(pa_source *s) {
1963 pa_source_assert_ref(s);
1964 pa_assert_ctl_context();
1965 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1967 ret = pa_idxset_size(s->outputs);
1968 pa_assert(ret >= s->n_corked);
1970 return ret - s->n_corked;
1973 /* Called from main thread */
1974 unsigned pa_source_check_suspend(pa_source *s, pa_source_output *ignore) {
1976 pa_source_output *o;
1979 pa_source_assert_ref(s);
1980 pa_assert_ctl_context();
1982 if (!PA_SOURCE_IS_LINKED(s->state))
1987 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1988 pa_source_output_state_t st;
1993 st = pa_source_output_get_state(o);
1995 /* We do not assert here. It is perfectly valid for a source output to
1996 * be in the INIT state (i.e. created, marked done but not yet put)
1997 * and we should not care if it's unlinked as it won't contribute
1998 * towards our busy status.
2000 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
2003 if (st == PA_SOURCE_OUTPUT_CORKED)
2006 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
2015 /* Called from the IO thread */
2016 static void sync_output_volumes_within_thread(pa_source *s) {
2017 pa_source_output *o;
2020 pa_source_assert_ref(s);
2021 pa_source_assert_io_context(s);
2023 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2024 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
2027 o->thread_info.soft_volume = o->soft_volume;
2028 //pa_source_output_request_rewind(o, 0, true, false, false);
2032 /* Called from the IO thread. Only called for the root source in volume sharing
2033 * cases, except for internal recursive calls. */
2034 static void set_shared_volume_within_thread(pa_source *s) {
2035 pa_source_output *o;
2038 pa_source_assert_ref(s);
2040 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2042 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2043 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
2044 set_shared_volume_within_thread(o->destination_source);
2048 /* Called from IO thread, except when it is not */
2049 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2050 pa_source *s = PA_SOURCE(object);
2051 pa_source_assert_ref(s);
2053 switch ((pa_source_message_t) code) {
2055 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2056 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2058 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2060 if (o->direct_on_input) {
2061 o->thread_info.direct_on_input = o->direct_on_input;
2062 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2065 pa_source_output_attach(o);
2067 pa_source_output_set_state_within_thread(o, o->state);
2069 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2070 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2072 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2074 /* We don't just invalidate the requested latency here,
2075 * because if we are in a move we might need to fix up the
2076 * requested latency. */
2077 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2079 /* In flat volume mode we need to update the volume as
2081 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2084 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2085 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2087 pa_source_output_set_state_within_thread(o, o->state);
2089 pa_source_output_detach(o);
2091 if (o->thread_info.direct_on_input) {
2092 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2093 o->thread_info.direct_on_input = NULL;
2096 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2097 pa_source_invalidate_requested_latency(s, true);
2099 /* In flat volume mode we need to update the volume as
2101 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2104 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2105 pa_source *root_source = pa_source_get_master(s);
2107 if (PA_LIKELY(root_source))
2108 set_shared_volume_within_thread(root_source);
2113 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2115 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2117 pa_source_volume_change_push(s);
2119 /* Fall through ... */
2121 case PA_SOURCE_MESSAGE_SET_VOLUME:
2123 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2124 s->thread_info.soft_volume = s->soft_volume;
2127 /* Fall through ... */
2129 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2130 sync_output_volumes_within_thread(s);
2133 case PA_SOURCE_MESSAGE_GET_VOLUME:
2135 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2137 pa_source_volume_change_flush(s);
2138 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2141 /* In case source implementor reset SW volume. */
2142 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2143 s->thread_info.soft_volume = s->soft_volume;
2148 case PA_SOURCE_MESSAGE_SET_MUTE:
2150 if (s->thread_info.soft_muted != s->muted) {
2151 s->thread_info.soft_muted = s->muted;
2154 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2159 case PA_SOURCE_MESSAGE_GET_MUTE:
2161 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2162 return s->get_mute(s, userdata);
2166 case PA_SOURCE_MESSAGE_SET_STATE: {
2168 bool suspend_change =
2169 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2170 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2172 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2174 if (suspend_change) {
2175 pa_source_output *o;
2178 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2179 if (o->suspend_within_thread)
2180 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2186 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2188 pa_usec_t *usec = userdata;
2189 *usec = pa_source_get_requested_latency_within_thread(s);
2191 /* Yes, that's right, the IO thread will see -1 when no
2192 * explicit requested latency is configured, the main
2193 * thread will see max_latency */
2194 if (*usec == (pa_usec_t) -1)
2195 *usec = s->thread_info.max_latency;
2200 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2201 pa_usec_t *r = userdata;
2203 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2208 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2209 pa_usec_t *r = userdata;
2211 r[0] = s->thread_info.min_latency;
2212 r[1] = s->thread_info.max_latency;
2217 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2219 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2222 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2224 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2227 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2229 *((size_t*) userdata) = s->thread_info.max_rewind;
2232 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2234 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2237 case PA_SOURCE_MESSAGE_GET_LATENCY:
2239 if (s->monitor_of) {
2240 *((int64_t*) userdata) = -pa_sink_get_latency_within_thread(s->monitor_of, true);
2244 /* Implementors need to overwrite this implementation! */
2247 case PA_SOURCE_MESSAGE_SET_PORT:
2249 pa_assert(userdata);
2251 struct source_message_set_port *msg_data = userdata;
2252 msg_data->ret = s->set_port(s, msg_data->port);
2256 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2257 /* This message is sent from IO-thread and handled in main thread. */
2258 pa_assert_ctl_context();
2260 /* Make sure we're not messing with main thread when no longer linked */
2261 if (!PA_SOURCE_IS_LINKED(s->state))
2264 pa_source_get_volume(s, true);
2265 pa_source_get_mute(s, true);
2268 case PA_SOURCE_MESSAGE_SET_PORT_LATENCY_OFFSET:
2269 s->thread_info.port_latency_offset = offset;
2272 case PA_SOURCE_MESSAGE_MAX:
2279 /* Called from main thread */
2280 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2285 pa_core_assert_ref(c);
2286 pa_assert_ctl_context();
2287 pa_assert(cause != 0);
2289 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2292 if (source->monitor_of)
2295 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2302 /* Called from IO thread */
2303 void pa_source_detach_within_thread(pa_source *s) {
2304 pa_source_output *o;
2307 pa_source_assert_ref(s);
2308 pa_source_assert_io_context(s);
2309 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2311 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2312 pa_source_output_detach(o);
2315 /* Called from IO thread */
2316 void pa_source_attach_within_thread(pa_source *s) {
2317 pa_source_output *o;
2320 pa_source_assert_ref(s);
2321 pa_source_assert_io_context(s);
2322 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2324 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2325 pa_source_output_attach(o);
2328 /* Called from IO thread */
2329 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2330 pa_usec_t result = (pa_usec_t) -1;
2331 pa_source_output *o;
2334 pa_source_assert_ref(s);
2335 pa_source_assert_io_context(s);
2337 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2338 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2340 if (s->thread_info.requested_latency_valid)
2341 return s->thread_info.requested_latency;
2343 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2344 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2345 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2346 result = o->thread_info.requested_source_latency;
2348 if (result != (pa_usec_t) -1)
2349 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2351 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2352 /* Only cache this if we are fully set up */
2353 s->thread_info.requested_latency = result;
2354 s->thread_info.requested_latency_valid = true;
2360 /* Called from main thread */
2361 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2364 pa_source_assert_ref(s);
2365 pa_assert_ctl_context();
2366 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2368 if (s->state == PA_SOURCE_SUSPENDED)
2371 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2376 /* Called from IO thread */
2377 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2378 pa_source_output *o;
2381 pa_source_assert_ref(s);
2382 pa_source_assert_io_context(s);
2384 if (max_rewind == s->thread_info.max_rewind)
2387 s->thread_info.max_rewind = max_rewind;
2389 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2390 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2391 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2394 /* Called from main thread */
2395 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2396 pa_source_assert_ref(s);
2397 pa_assert_ctl_context();
2399 if (PA_SOURCE_IS_LINKED(s->state))
2400 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2402 pa_source_set_max_rewind_within_thread(s, max_rewind);
2405 /* Called from IO thread */
2406 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2407 pa_source_output *o;
2410 pa_source_assert_ref(s);
2411 pa_source_assert_io_context(s);
2413 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2414 s->thread_info.requested_latency_valid = false;
2418 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2420 if (s->update_requested_latency)
2421 s->update_requested_latency(s);
2423 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2424 if (o->update_source_requested_latency)
2425 o->update_source_requested_latency(o);
2429 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2432 /* Called from main thread */
2433 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2434 pa_source_assert_ref(s);
2435 pa_assert_ctl_context();
2437 /* min_latency == 0: no limit
2438 * min_latency anything else: specified limit
2440 * Similar for max_latency */
2442 if (min_latency < ABSOLUTE_MIN_LATENCY)
2443 min_latency = ABSOLUTE_MIN_LATENCY;
2445 if (max_latency <= 0 ||
2446 max_latency > ABSOLUTE_MAX_LATENCY)
2447 max_latency = ABSOLUTE_MAX_LATENCY;
2449 pa_assert(min_latency <= max_latency);
2451 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2452 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2453 max_latency == ABSOLUTE_MAX_LATENCY) ||
2454 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2456 if (PA_SOURCE_IS_LINKED(s->state)) {
2462 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2464 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2467 /* Called from main thread */
2468 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2469 pa_source_assert_ref(s);
2470 pa_assert_ctl_context();
2471 pa_assert(min_latency);
2472 pa_assert(max_latency);
2474 if (PA_SOURCE_IS_LINKED(s->state)) {
2475 pa_usec_t r[2] = { 0, 0 };
2477 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2479 *min_latency = r[0];
2480 *max_latency = r[1];
2482 *min_latency = s->thread_info.min_latency;
2483 *max_latency = s->thread_info.max_latency;
2487 /* Called from IO thread, and from main thread before pa_source_put() is called */
2488 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2489 pa_source_assert_ref(s);
2490 pa_source_assert_io_context(s);
2492 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2493 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2494 pa_assert(min_latency <= max_latency);
2496 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2497 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2498 max_latency == ABSOLUTE_MAX_LATENCY) ||
2499 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2502 if (s->thread_info.min_latency == min_latency &&
2503 s->thread_info.max_latency == max_latency)
2506 s->thread_info.min_latency = min_latency;
2507 s->thread_info.max_latency = max_latency;
2509 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2510 pa_source_output *o;
2513 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2514 if (o->update_source_latency_range)
2515 o->update_source_latency_range(o);
2518 pa_source_invalidate_requested_latency(s, false);
2521 /* Called from main thread, before the source is put */
2522 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2523 pa_source_assert_ref(s);
2524 pa_assert_ctl_context();
2526 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2527 pa_assert(latency == 0);
2531 if (latency < ABSOLUTE_MIN_LATENCY)
2532 latency = ABSOLUTE_MIN_LATENCY;
2534 if (latency > ABSOLUTE_MAX_LATENCY)
2535 latency = ABSOLUTE_MAX_LATENCY;
2537 if (PA_SOURCE_IS_LINKED(s->state))
2538 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2540 s->thread_info.fixed_latency = latency;
2543 /* Called from main thread */
2544 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2547 pa_source_assert_ref(s);
2548 pa_assert_ctl_context();
2550 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2553 if (PA_SOURCE_IS_LINKED(s->state))
2554 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2556 latency = s->thread_info.fixed_latency;
2561 /* Called from IO thread */
2562 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2563 pa_source_assert_ref(s);
2564 pa_source_assert_io_context(s);
2566 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2567 pa_assert(latency == 0);
2568 s->thread_info.fixed_latency = 0;
2573 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2574 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2576 if (s->thread_info.fixed_latency == latency)
2579 s->thread_info.fixed_latency = latency;
2581 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2582 pa_source_output *o;
2585 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2586 if (o->update_source_fixed_latency)
2587 o->update_source_fixed_latency(o);
2590 pa_source_invalidate_requested_latency(s, false);
2593 /* Called from main thread */
2594 void pa_source_set_port_latency_offset(pa_source *s, int64_t offset) {
2595 pa_source_assert_ref(s);
2597 s->port_latency_offset = offset;
2599 if (PA_SOURCE_IS_LINKED(s->state))
2600 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2602 s->thread_info.port_latency_offset = offset;
2604 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_LATENCY_OFFSET_CHANGED], s);
2607 /* Called from main thread */
2608 size_t pa_source_get_max_rewind(pa_source *s) {
2610 pa_assert_ctl_context();
2611 pa_source_assert_ref(s);
2613 if (!PA_SOURCE_IS_LINKED(s->state))
2614 return s->thread_info.max_rewind;
2616 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2621 /* Called from main context */
2622 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2623 pa_device_port *port;
2626 pa_source_assert_ref(s);
2627 pa_assert_ctl_context();
2630 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2631 return -PA_ERR_NOTIMPLEMENTED;
2635 return -PA_ERR_NOENTITY;
2637 if (!(port = pa_hashmap_get(s->ports, name)))
2638 return -PA_ERR_NOENTITY;
2640 if (s->active_port == port) {
2641 s->save_port = s->save_port || save;
2645 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2646 struct source_message_set_port msg = { .port = port, .ret = 0 };
2647 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2651 ret = s->set_port(s, port);
2654 return -PA_ERR_NOENTITY;
2656 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2658 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2660 s->active_port = port;
2661 s->save_port = save;
2663 /* The active port affects the default source selection. */
2664 pa_core_update_default_source(s->core);
2666 pa_source_set_port_latency_offset(s, s->active_port->latency_offset);
2668 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2673 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2675 /* Called from the IO thread. */
2676 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2677 pa_source_volume_change *c;
2678 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2679 c = pa_xnew(pa_source_volume_change, 1);
2681 PA_LLIST_INIT(pa_source_volume_change, c);
2683 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2687 /* Called from the IO thread. */
2688 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2690 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2694 /* Called from the IO thread. */
2695 void pa_source_volume_change_push(pa_source *s) {
2696 pa_source_volume_change *c = NULL;
2697 pa_source_volume_change *nc = NULL;
2698 pa_source_volume_change *pc = NULL;
2699 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2701 const char *direction = NULL;
2704 nc = pa_source_volume_change_new(s);
2706 /* NOTE: There is already more different volumes in pa_source that I can remember.
2707 * Adding one more volume for HW would get us rid of this, but I am trying
2708 * to survive with the ones we already have. */
2709 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2711 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2712 pa_log_debug("Volume not changing");
2713 pa_source_volume_change_free(nc);
2717 nc->at = pa_source_get_latency_within_thread(s, false);
2718 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2720 if (s->thread_info.volume_changes_tail) {
2721 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2722 /* If volume is going up let's do it a bit late. If it is going
2723 * down let's do it a bit early. */
2724 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2725 if (nc->at + safety_margin > c->at) {
2726 nc->at += safety_margin;
2731 else if (nc->at - safety_margin > c->at) {
2732 nc->at -= safety_margin;
2740 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2741 nc->at += safety_margin;
2744 nc->at -= safety_margin;
2747 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2750 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2753 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2755 /* We can ignore volume events that came earlier but should happen later than this. */
2756 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
2757 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2758 pa_source_volume_change_free(c);
2761 s->thread_info.volume_changes_tail = nc;
2764 /* Called from the IO thread. */
2765 static void pa_source_volume_change_flush(pa_source *s) {
2766 pa_source_volume_change *c = s->thread_info.volume_changes;
2768 s->thread_info.volume_changes = NULL;
2769 s->thread_info.volume_changes_tail = NULL;
2771 pa_source_volume_change *next = c->next;
2772 pa_source_volume_change_free(c);
2777 /* Called from the IO thread. */
2778 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2784 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2790 pa_assert(s->write_volume);
2792 now = pa_rtclock_now();
2794 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2795 pa_source_volume_change *c = s->thread_info.volume_changes;
2796 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2797 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2798 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2800 s->thread_info.current_hw_volume = c->hw_volume;
2801 pa_source_volume_change_free(c);
2807 if (s->thread_info.volume_changes) {
2809 *usec_to_next = s->thread_info.volume_changes->at - now;
2810 if (pa_log_ratelimit(PA_LOG_DEBUG))
2811 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2816 s->thread_info.volume_changes_tail = NULL;
2821 /* Called from the main thread */
2822 /* Gets the list of formats supported by the source. The members and idxset must
2823 * be freed by the caller. */
2824 pa_idxset* pa_source_get_formats(pa_source *s) {
2829 if (s->get_formats) {
2830 /* Source supports format query, all is good */
2831 ret = s->get_formats(s);
2833 /* Source doesn't support format query, so assume it does PCM */
2834 pa_format_info *f = pa_format_info_new();
2835 f->encoding = PA_ENCODING_PCM;
2837 ret = pa_idxset_new(NULL, NULL);
2838 pa_idxset_put(ret, f, NULL);
2844 /* Called from the main thread */
2845 /* Checks if the source can accept this format */
2846 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2847 pa_idxset *formats = NULL;
2853 formats = pa_source_get_formats(s);
2856 pa_format_info *finfo_device;
2859 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2860 if (pa_format_info_is_compatible(finfo_device, f)) {
2866 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2872 /* Called from the main thread */
2873 /* Calculates the intersection between formats supported by the source and
2874 * in_formats, and returns these, in the order of the source's formats. */
2875 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2876 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2877 pa_format_info *f_source, *f_in;
2882 if (!in_formats || pa_idxset_isempty(in_formats))
2885 source_formats = pa_source_get_formats(s);
2887 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2888 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2889 if (pa_format_info_is_compatible(f_source, f_in))
2890 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2896 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2901 /* Called from the main thread. */
2902 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2903 pa_cvolume old_volume;
2904 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2905 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2910 old_volume = s->reference_volume;
2912 if (pa_cvolume_equal(volume, &old_volume))
2915 s->reference_volume = *volume;
2916 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
2917 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
2918 s->flags & PA_SOURCE_DECIBEL_VOLUME),
2919 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
2920 s->flags & PA_SOURCE_DECIBEL_VOLUME));
2922 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2923 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_VOLUME_CHANGED], s);