2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <pulse/format.h>
29 #include <pulse/utf8.h>
30 #include <pulse/xmalloc.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/rtclock.h>
34 #include <pulse/internal.h>
36 #include <pulsecore/core-util.h>
37 #include <pulsecore/source-output.h>
38 #include <pulsecore/namereg.h>
39 #include <pulsecore/core-subscribe.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/mix.h>
42 #include <pulsecore/flist.h>
46 #define ABSOLUTE_MIN_LATENCY (500)
47 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
48 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
50 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
52 struct pa_source_volume_change {
56 PA_LLIST_FIELDS(pa_source_volume_change);
59 struct source_message_set_port {
64 static void source_free(pa_object *o);
66 static void pa_source_volume_change_push(pa_source *s);
67 static void pa_source_volume_change_flush(pa_source *s);
69 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
73 data->proplist = pa_proplist_new();
74 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
83 data->name = pa_xstrdup(name);
86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
100 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
103 data->alternate_sample_rate_is_set = true;
104 data->alternate_sample_rate = alternate_sample_rate;
107 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
110 if ((data->volume_is_set = !!volume))
111 data->volume = *volume;
114 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
117 data->muted_is_set = true;
121 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
124 pa_xfree(data->active_port);
125 data->active_port = pa_xstrdup(port);
128 void pa_source_new_data_done(pa_source_new_data *data) {
131 pa_proplist_free(data->proplist);
134 pa_hashmap_free(data->ports);
136 pa_xfree(data->name);
137 pa_xfree(data->active_port);
140 /* Called from main context */
141 static void reset_callbacks(pa_source *s) {
145 s->get_volume = NULL;
146 s->set_volume = NULL;
147 s->write_volume = NULL;
150 s->update_requested_latency = NULL;
152 s->get_formats = NULL;
153 s->update_rate = NULL;
156 /* Called from main context */
157 pa_source* pa_source_new(
159 pa_source_new_data *data,
160 pa_source_flags_t flags) {
164 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
169 pa_assert(data->name);
170 pa_assert_ctl_context();
172 s = pa_msgobject_new(pa_source);
174 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
175 pa_log_debug("Failed to register name %s.", data->name);
180 pa_source_new_data_set_name(data, name);
182 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
184 pa_namereg_unregister(core, name);
188 /* FIXME, need to free s here on failure */
190 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
191 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
193 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
195 if (!data->channel_map_is_set)
196 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
198 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
199 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
201 /* FIXME: There should probably be a general function for checking whether
202 * the source volume is allowed to be set, like there is for source outputs. */
203 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
205 if (!data->volume_is_set) {
206 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
207 data->save_volume = false;
210 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
211 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
213 if (!data->muted_is_set)
217 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
219 pa_device_init_description(data->proplist, data->card);
220 pa_device_init_icon(data->proplist, false);
221 pa_device_init_intended_roles(data->proplist);
223 if (!data->active_port) {
224 pa_device_port *p = pa_device_port_find_best(data->ports);
226 pa_source_new_data_set_port(data, p->name);
229 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
231 pa_namereg_unregister(core, name);
235 s->parent.parent.free = source_free;
236 s->parent.process_msg = pa_source_process_msg;
239 s->state = PA_SOURCE_INIT;
242 s->suspend_cause = data->suspend_cause;
243 pa_source_set_mixer_dirty(s, false);
244 s->name = pa_xstrdup(name);
245 s->proplist = pa_proplist_copy(data->proplist);
246 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
247 s->module = data->module;
248 s->card = data->card;
250 s->priority = pa_device_init_priority(s->proplist);
252 s->sample_spec = data->sample_spec;
253 s->channel_map = data->channel_map;
254 s->default_sample_rate = s->sample_spec.rate;
256 if (data->alternate_sample_rate_is_set)
257 s->alternate_sample_rate = data->alternate_sample_rate;
259 s->alternate_sample_rate = s->core->alternate_sample_rate;
261 if (s->sample_spec.rate == s->alternate_sample_rate) {
262 pa_log_warn("Default and alternate sample rates are the same.");
263 s->alternate_sample_rate = 0;
266 s->outputs = pa_idxset_new(NULL, NULL);
268 s->monitor_of = NULL;
269 s->output_from_master = NULL;
271 s->reference_volume = s->real_volume = data->volume;
272 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
273 s->base_volume = PA_VOLUME_NORM;
274 s->n_volume_steps = PA_VOLUME_NORM+1;
275 s->muted = data->muted;
276 s->refresh_volume = s->refresh_muted = false;
283 /* As a minor optimization we just steal the list instead of
285 s->ports = data->ports;
288 s->active_port = NULL;
289 s->save_port = false;
291 if (data->active_port)
292 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
293 s->save_port = data->save_port;
295 /* Hopefully the active port has already been assigned in the previous call
296 to pa_device_port_find_best, but better safe than sorry */
298 s->active_port = pa_device_port_find_best(s->ports);
301 s->latency_offset = s->active_port->latency_offset;
303 s->latency_offset = 0;
305 s->save_volume = data->save_volume;
306 s->save_muted = data->save_muted;
308 pa_silence_memchunk_get(
309 &core->silence_cache,
315 s->thread_info.rtpoll = NULL;
316 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
317 (pa_free_cb_t) pa_source_output_unref);
318 s->thread_info.soft_volume = s->soft_volume;
319 s->thread_info.soft_muted = s->muted;
320 s->thread_info.state = s->state;
321 s->thread_info.max_rewind = 0;
322 s->thread_info.requested_latency_valid = false;
323 s->thread_info.requested_latency = 0;
324 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
325 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
326 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
328 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
329 s->thread_info.volume_changes_tail = NULL;
330 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
331 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
332 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
333 s->thread_info.latency_offset = s->latency_offset;
335 /* FIXME: This should probably be moved to pa_source_put() */
336 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
339 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
341 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
342 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
345 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
346 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
353 /* Called from main context */
354 static int source_set_state(pa_source *s, pa_source_state_t state) {
357 pa_source_state_t original_state;
360 pa_assert_ctl_context();
362 if (s->state == state)
365 original_state = s->state;
368 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
369 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
372 if ((ret = s->set_state(s, state)) < 0)
376 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379 s->set_state(s, original_state);
386 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
387 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
388 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 if (suspend_change) {
395 /* We're suspending or resuming, tell everyone about it */
397 PA_IDXSET_FOREACH(o, s->outputs, idx)
398 if (s->state == PA_SOURCE_SUSPENDED &&
399 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
400 pa_source_output_kill(o);
402 o->suspend(o, state == PA_SOURCE_SUSPENDED);
408 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
414 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
415 pa_source_flags_t flags;
418 pa_assert(!s->write_volume || cb);
422 /* Save the current flags so we can tell if they've changed */
426 /* The source implementor is responsible for setting decibel volume support */
427 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
429 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
430 /* See note below in pa_source_put() about volume sharing and decibel volumes */
431 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
434 /* If the flags have changed after init, let any clients know via a change event */
435 if (s->state != PA_SOURCE_INIT && flags != s->flags)
436 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
439 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
440 pa_source_flags_t flags;
443 pa_assert(!cb || s->set_volume);
445 s->write_volume = cb;
447 /* Save the current flags so we can tell if they've changed */
451 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
453 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
455 /* If the flags have changed after init, let any clients know via a change event */
456 if (s->state != PA_SOURCE_INIT && flags != s->flags)
457 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
460 void pa_source_set_get_mute_callback(pa_source *s, pa_source_get_mute_cb_t cb) {
466 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
467 pa_source_flags_t flags;
473 /* Save the current flags so we can tell if they've changed */
477 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
479 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
481 /* If the flags have changed after init, let any clients know via a change event */
482 if (s->state != PA_SOURCE_INIT && flags != s->flags)
483 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 static void enable_flat_volume(pa_source *s, bool enable) {
487 pa_source_flags_t flags;
491 /* Always follow the overall user preference here */
492 enable = enable && s->core->flat_volumes;
494 /* Save the current flags so we can tell if they've changed */
498 s->flags |= PA_SOURCE_FLAT_VOLUME;
500 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
502 /* If the flags have changed after init, let any clients know via a change event */
503 if (s->state != PA_SOURCE_INIT && flags != s->flags)
504 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
508 pa_source_flags_t flags;
512 /* Save the current flags so we can tell if they've changed */
516 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
517 enable_flat_volume(s, true);
519 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
520 enable_flat_volume(s, false);
523 /* If the flags have changed after init, let any clients know via a change event */
524 if (s->state != PA_SOURCE_INIT && flags != s->flags)
525 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
528 /* Called from main context */
529 void pa_source_put(pa_source *s) {
530 pa_source_assert_ref(s);
531 pa_assert_ctl_context();
533 pa_assert(s->state == PA_SOURCE_INIT);
534 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || pa_source_is_filter(s));
536 /* The following fields must be initialized properly when calling _put() */
537 pa_assert(s->asyncmsgq);
538 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
540 /* Generally, flags should be initialized via pa_source_new(). As a
541 * special exception we allow some volume related flags to be set
542 * between _new() and _put() by the callback setter functions above.
544 * Thus we implement a couple safeguards here which ensure the above
545 * setters were used (or at least the implementor made manual changes
546 * in a compatible way).
548 * Note: All of these flags set here can change over the life time
550 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
551 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
552 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
554 /* XXX: Currently decibel volume is disabled for all sources that use volume
555 * sharing. When the master source supports decibel volume, it would be good
556 * to have the flag also in the filter source, but currently we don't do that
557 * so that the flags of the filter source never change when it's moved from
558 * a master source to another. One solution for this problem would be to
559 * remove user-visible volume altogether from filter sources when volume
560 * sharing is used, but the current approach was easier to implement... */
561 /* We always support decibel volumes in software, otherwise we leave it to
562 * the source implementor to set this flag as needed.
564 * Note: This flag can also change over the life time of the source. */
565 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
566 pa_source_enable_decibel_volume(s, true);
567 s->soft_volume = s->reference_volume;
570 /* If the source implementor support DB volumes by itself, we should always
571 * try and enable flat volumes too */
572 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
573 enable_flat_volume(s, true);
575 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
576 pa_source *root_source = pa_source_get_master(s);
578 pa_assert(PA_LIKELY(root_source));
580 s->reference_volume = root_source->reference_volume;
581 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
583 s->real_volume = root_source->real_volume;
584 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
586 /* We assume that if the sink implementor changed the default
587 * volume he did so in real_volume, because that is the usual
588 * place where he is supposed to place his changes. */
589 s->reference_volume = s->real_volume;
591 s->thread_info.soft_volume = s->soft_volume;
592 s->thread_info.soft_muted = s->muted;
593 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
595 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
596 || (s->base_volume == PA_VOLUME_NORM
597 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
598 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
599 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
601 if (s->suspend_cause)
602 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
604 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
606 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
607 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
610 /* Called from main context */
611 void pa_source_unlink(pa_source *s) {
613 pa_source_output *o, PA_UNUSED *j = NULL;
616 pa_assert_ctl_context();
618 /* See pa_sink_unlink() for a couple of comments how this function
621 linked = PA_SOURCE_IS_LINKED(s->state);
624 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
626 if (s->state != PA_SOURCE_UNLINKED)
627 pa_namereg_unregister(s->core, s->name);
628 pa_idxset_remove_by_data(s->core->sources, s, NULL);
631 pa_idxset_remove_by_data(s->card->sources, s, NULL);
633 while ((o = pa_idxset_first(s->outputs, NULL))) {
635 pa_source_output_kill(o);
640 source_set_state(s, PA_SOURCE_UNLINKED);
642 s->state = PA_SOURCE_UNLINKED;
647 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
648 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
652 /* Called from main context */
653 static void source_free(pa_object *o) {
654 pa_source *s = PA_SOURCE(o);
657 pa_assert_ctl_context();
658 pa_assert(pa_source_refcnt(s) == 0);
660 if (PA_SOURCE_IS_LINKED(s->state))
663 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
665 pa_source_volume_change_flush(s);
667 pa_idxset_free(s->outputs, NULL);
668 pa_hashmap_free(s->thread_info.outputs);
670 if (s->silence.memblock)
671 pa_memblock_unref(s->silence.memblock);
677 pa_proplist_free(s->proplist);
680 pa_hashmap_free(s->ports);
685 /* Called from main context, and not while the IO thread is active, please */
686 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
687 pa_source_assert_ref(s);
688 pa_assert_ctl_context();
693 /* Called from main context, and not while the IO thread is active, please */
694 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
695 pa_source_flags_t old_flags;
696 pa_source_output *output;
699 pa_source_assert_ref(s);
700 pa_assert_ctl_context();
702 /* For now, allow only a minimal set of flags to be changed. */
703 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
705 old_flags = s->flags;
706 s->flags = (s->flags & ~mask) | (value & mask);
708 if (s->flags == old_flags)
711 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
712 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
714 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
715 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
716 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
718 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
719 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
721 PA_IDXSET_FOREACH(output, s->outputs, idx) {
722 if (output->destination_source)
723 pa_source_update_flags(output->destination_source, mask, value);
727 /* Called from IO context, or before _put() from main context */
728 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
729 pa_source_assert_ref(s);
730 pa_source_assert_io_context(s);
732 s->thread_info.rtpoll = p;
735 /* Called from main context */
736 int pa_source_update_status(pa_source*s) {
737 pa_source_assert_ref(s);
738 pa_assert_ctl_context();
739 pa_assert(PA_SOURCE_IS_LINKED(s->state));
741 if (s->state == PA_SOURCE_SUSPENDED)
744 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
747 /* Called from any context - must be threadsafe */
748 void pa_source_set_mixer_dirty(pa_source *s, bool is_dirty) {
749 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
752 /* Called from main context */
753 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
754 pa_source_assert_ref(s);
755 pa_assert_ctl_context();
756 pa_assert(PA_SOURCE_IS_LINKED(s->state));
757 pa_assert(cause != 0);
759 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
760 return -PA_ERR_NOTSUPPORTED;
763 s->suspend_cause |= cause;
765 s->suspend_cause &= ~cause;
767 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
768 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
769 it'll be handled just fine. */
770 pa_source_set_mixer_dirty(s, false);
771 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
772 if (s->active_port && s->set_port) {
773 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
774 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
775 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
778 s->set_port(s, s->active_port);
788 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
791 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
793 if (s->suspend_cause)
794 return source_set_state(s, PA_SOURCE_SUSPENDED);
796 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
799 /* Called from main context */
800 int pa_source_sync_suspend(pa_source *s) {
801 pa_sink_state_t state;
803 pa_source_assert_ref(s);
804 pa_assert_ctl_context();
805 pa_assert(PA_SOURCE_IS_LINKED(s->state));
806 pa_assert(s->monitor_of);
808 state = pa_sink_get_state(s->monitor_of);
810 if (state == PA_SINK_SUSPENDED)
811 return source_set_state(s, PA_SOURCE_SUSPENDED);
813 pa_assert(PA_SINK_IS_OPENED(state));
815 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
818 /* Called from main context */
819 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
820 pa_source_output *o, *n;
823 pa_source_assert_ref(s);
824 pa_assert_ctl_context();
825 pa_assert(PA_SOURCE_IS_LINKED(s->state));
830 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
831 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
833 pa_source_output_ref(o);
835 if (pa_source_output_start_move(o) >= 0)
838 pa_source_output_unref(o);
844 /* Called from main context */
845 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
848 pa_source_assert_ref(s);
849 pa_assert_ctl_context();
850 pa_assert(PA_SOURCE_IS_LINKED(s->state));
853 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
854 if (pa_source_output_finish_move(o, s, save) < 0)
855 pa_source_output_fail_move(o);
857 pa_source_output_unref(o);
860 pa_queue_free(q, NULL);
863 /* Called from main context */
864 void pa_source_move_all_fail(pa_queue *q) {
867 pa_assert_ctl_context();
870 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
871 pa_source_output_fail_move(o);
872 pa_source_output_unref(o);
875 pa_queue_free(q, NULL);
878 /* Called from IO thread context */
879 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
883 pa_source_assert_ref(s);
884 pa_source_assert_io_context(s);
885 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
890 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
893 pa_log_debug("Processing rewind...");
895 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
896 pa_source_output_assert_ref(o);
897 pa_source_output_process_rewind(o, nbytes);
901 /* Called from IO thread context */
902 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
906 pa_source_assert_ref(s);
907 pa_source_assert_io_context(s);
908 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
911 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
914 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
915 pa_memchunk vchunk = *chunk;
917 pa_memblock_ref(vchunk.memblock);
918 pa_memchunk_make_writable(&vchunk, 0);
920 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
921 pa_silence_memchunk(&vchunk, &s->sample_spec);
923 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
925 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
926 pa_source_output_assert_ref(o);
928 if (!o->thread_info.direct_on_input)
929 pa_source_output_push(o, &vchunk);
932 pa_memblock_unref(vchunk.memblock);
935 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
936 pa_source_output_assert_ref(o);
938 if (!o->thread_info.direct_on_input)
939 pa_source_output_push(o, chunk);
944 /* Called from IO thread context */
945 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
946 pa_source_assert_ref(s);
947 pa_source_assert_io_context(s);
948 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
949 pa_source_output_assert_ref(o);
950 pa_assert(o->thread_info.direct_on_input);
953 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
956 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
957 pa_memchunk vchunk = *chunk;
959 pa_memblock_ref(vchunk.memblock);
960 pa_memchunk_make_writable(&vchunk, 0);
962 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
963 pa_silence_memchunk(&vchunk, &s->sample_spec);
965 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
967 pa_source_output_push(o, &vchunk);
969 pa_memblock_unref(vchunk.memblock);
971 pa_source_output_push(o, chunk);
974 /* Called from main thread */
975 int pa_source_update_rate(pa_source *s, uint32_t rate, bool passthrough) {
977 uint32_t desired_rate = rate;
978 uint32_t default_rate = s->default_sample_rate;
979 uint32_t alternate_rate = s->alternate_sample_rate;
980 bool default_rate_is_usable = false;
981 bool alternate_rate_is_usable = false;
983 if (rate == s->sample_spec.rate)
986 if (!s->update_rate && !s->monitor_of)
989 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
990 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
994 if (PA_SOURCE_IS_RUNNING(s->state)) {
995 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
996 s->sample_spec.rate);
1000 if (s->monitor_of) {
1001 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
1002 pa_log_info("Cannot update rate, this is a monitor source and the sink is running.");
1007 if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
1010 if (!passthrough && default_rate != desired_rate && alternate_rate != desired_rate) {
1011 if (default_rate % 11025 == 0 && desired_rate % 11025 == 0)
1012 default_rate_is_usable = true;
1013 if (default_rate % 4000 == 0 && desired_rate % 4000 == 0)
1014 default_rate_is_usable = true;
1015 if (alternate_rate && alternate_rate % 11025 == 0 && desired_rate % 11025 == 0)
1016 alternate_rate_is_usable = true;
1017 if (alternate_rate && alternate_rate % 4000 == 0 && desired_rate % 4000 == 0)
1018 alternate_rate_is_usable = true;
1020 if (alternate_rate_is_usable && !default_rate_is_usable)
1021 desired_rate = alternate_rate;
1023 desired_rate = default_rate;
1026 if (desired_rate == s->sample_spec.rate)
1029 if (!passthrough && pa_source_used_by(s) > 0)
1032 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1033 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1036 ret = s->update_rate(s, desired_rate);
1038 /* This is a monitor source. */
1040 /* XXX: This code is written with non-passthrough streams in mind. I
1041 * have no idea whether the behaviour with passthrough streams is
1044 uint32_t old_rate = s->sample_spec.rate;
1046 s->sample_spec.rate = desired_rate;
1047 ret = pa_sink_update_rate(s->monitor_of, desired_rate, false);
1050 /* Changing the sink rate failed, roll back the old rate for
1051 * the monitor source. Why did we set the source rate before
1052 * calling pa_sink_update_rate(), you may ask. The reason is
1053 * that pa_sink_update_rate() tries to update the monitor
1054 * source rate, but we are already in the process of updating
1055 * the monitor source rate, so there's a risk of entering an
1056 * infinite loop. Setting the source rate before calling
1057 * pa_sink_update_rate() makes the rate == s->sample_spec.rate
1058 * check in the beginning of this function return early, so we
1060 s->sample_spec.rate = old_rate;
1068 pa_source_output *o;
1070 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1071 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1072 pa_source_output_update_rate(o);
1075 pa_log_info("Changed sampling rate successfully");
1078 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1083 /* Called from main thread */
1084 pa_usec_t pa_source_get_latency(pa_source *s) {
1087 pa_source_assert_ref(s);
1088 pa_assert_ctl_context();
1089 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1091 if (s->state == PA_SOURCE_SUSPENDED)
1094 if (!(s->flags & PA_SOURCE_LATENCY))
1097 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1099 /* usec is unsigned, so check that the offset can be added to usec without
1101 if (-s->latency_offset <= (int64_t) usec)
1102 usec += s->latency_offset;
1109 /* Called from IO thread */
1110 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1114 pa_source_assert_ref(s);
1115 pa_source_assert_io_context(s);
1116 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1118 /* The returned value is supposed to be in the time domain of the sound card! */
1120 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1123 if (!(s->flags & PA_SOURCE_LATENCY))
1126 o = PA_MSGOBJECT(s);
1128 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1130 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1133 /* usec is unsigned, so check that the offset can be added to usec without
1135 if (-s->thread_info.latency_offset <= (int64_t) usec)
1136 usec += s->thread_info.latency_offset;
1143 /* Called from the main thread (and also from the IO thread while the main
1144 * thread is waiting).
1146 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1147 * set. Instead, flat volume mode is detected by checking whether the root source
1148 * has the flag set. */
1149 bool pa_source_flat_volume_enabled(pa_source *s) {
1150 pa_source_assert_ref(s);
1152 s = pa_source_get_master(s);
1155 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1160 /* Called from the main thread (and also from the IO thread while the main
1161 * thread is waiting). */
1162 pa_source *pa_source_get_master(pa_source *s) {
1163 pa_source_assert_ref(s);
1165 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1166 if (PA_UNLIKELY(!s->output_from_master))
1169 s = s->output_from_master->source;
1175 /* Called from main context */
1176 bool pa_source_is_filter(pa_source *s) {
1177 pa_source_assert_ref(s);
1179 return (s->output_from_master != NULL);
1182 /* Called from main context */
1183 bool pa_source_is_passthrough(pa_source *s) {
1185 pa_source_assert_ref(s);
1187 /* NB Currently only monitor sources support passthrough mode */
1188 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1191 /* Called from main context */
1192 void pa_source_enter_passthrough(pa_source *s) {
1195 /* set the volume to NORM */
1196 s->saved_volume = *pa_source_get_volume(s, true);
1197 s->saved_save_volume = s->save_volume;
1199 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1200 pa_source_set_volume(s, &volume, true, false);
1203 /* Called from main context */
1204 void pa_source_leave_passthrough(pa_source *s) {
1205 /* Restore source volume to what it was before we entered passthrough mode */
1206 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1208 pa_cvolume_init(&s->saved_volume);
1209 s->saved_save_volume = false;
1212 /* Called from main context. */
1213 static void compute_reference_ratio(pa_source_output *o) {
1215 pa_cvolume remapped;
1219 pa_assert(pa_source_flat_volume_enabled(o->source));
1222 * Calculates the reference ratio from the source's reference
1223 * volume. This basically calculates:
1225 * o->reference_ratio = o->volume / o->source->reference_volume
1228 remapped = o->source->reference_volume;
1229 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1231 ratio = o->reference_ratio;
1233 for (c = 0; c < o->sample_spec.channels; c++) {
1235 /* We don't update when the source volume is 0 anyway */
1236 if (remapped.values[c] <= PA_VOLUME_MUTED)
1239 /* Don't update the reference ratio unless necessary */
1240 if (pa_sw_volume_multiply(
1242 remapped.values[c]) == o->volume.values[c])
1245 ratio.values[c] = pa_sw_volume_divide(
1246 o->volume.values[c],
1247 remapped.values[c]);
1250 pa_source_output_set_reference_ratio(o, &ratio);
1253 /* Called from main context. Only called for the root source in volume sharing
1254 * cases, except for internal recursive calls. */
1255 static void compute_reference_ratios(pa_source *s) {
1257 pa_source_output *o;
1259 pa_source_assert_ref(s);
1260 pa_assert_ctl_context();
1261 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1262 pa_assert(pa_source_flat_volume_enabled(s));
1264 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1265 compute_reference_ratio(o);
1267 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1268 compute_reference_ratios(o->destination_source);
1272 /* Called from main context. Only called for the root source in volume sharing
1273 * cases, except for internal recursive calls. */
1274 static void compute_real_ratios(pa_source *s) {
1275 pa_source_output *o;
1278 pa_source_assert_ref(s);
1279 pa_assert_ctl_context();
1280 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1281 pa_assert(pa_source_flat_volume_enabled(s));
1283 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1285 pa_cvolume remapped;
1287 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1288 /* The origin source uses volume sharing, so this input's real ratio
1289 * is handled as a special case - the real ratio must be 0 dB, and
1290 * as a result i->soft_volume must equal i->volume_factor. */
1291 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1292 o->soft_volume = o->volume_factor;
1294 compute_real_ratios(o->destination_source);
1300 * This basically calculates:
1302 * i->real_ratio := i->volume / s->real_volume
1303 * i->soft_volume := i->real_ratio * i->volume_factor
1306 remapped = s->real_volume;
1307 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1309 o->real_ratio.channels = o->sample_spec.channels;
1310 o->soft_volume.channels = o->sample_spec.channels;
1312 for (c = 0; c < o->sample_spec.channels; c++) {
1314 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1315 /* We leave o->real_ratio untouched */
1316 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1320 /* Don't lose accuracy unless necessary */
1321 if (pa_sw_volume_multiply(
1322 o->real_ratio.values[c],
1323 remapped.values[c]) != o->volume.values[c])
1325 o->real_ratio.values[c] = pa_sw_volume_divide(
1326 o->volume.values[c],
1327 remapped.values[c]);
1329 o->soft_volume.values[c] = pa_sw_volume_multiply(
1330 o->real_ratio.values[c],
1331 o->volume_factor.values[c]);
1334 /* We don't copy the soft_volume to the thread_info data
1335 * here. That must be done by the caller */
1339 static pa_cvolume *cvolume_remap_minimal_impact(
1341 const pa_cvolume *template,
1342 const pa_channel_map *from,
1343 const pa_channel_map *to) {
1348 pa_assert(template);
1351 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1352 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1354 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1355 * mapping from source output to source volumes:
1357 * If template is a possible remapping from v it is used instead
1358 * of remapping anew.
1360 * If the channel maps don't match we set an all-channel volume on
1361 * the source to ensure that changing a volume on one stream has no
1362 * effect that cannot be compensated for in another stream that
1363 * does not have the same channel map as the source. */
1365 if (pa_channel_map_equal(from, to))
1369 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1374 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1378 /* Called from main thread. Only called for the root source in volume sharing
1379 * cases, except for internal recursive calls. */
1380 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1381 pa_source_output *o;
1384 pa_source_assert_ref(s);
1385 pa_assert(max_volume);
1386 pa_assert(channel_map);
1387 pa_assert(pa_source_flat_volume_enabled(s));
1389 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1390 pa_cvolume remapped;
1392 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1393 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1395 /* Ignore this output. The origin source uses volume sharing, so this
1396 * output's volume will be set to be equal to the root source's real
1397 * volume. Obviously this output's current volume must not then
1398 * affect what the root source's real volume will be. */
1402 remapped = o->volume;
1403 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1404 pa_cvolume_merge(max_volume, max_volume, &remapped);
1408 /* Called from main thread. Only called for the root source in volume sharing
1409 * cases, except for internal recursive calls. */
1410 static bool has_outputs(pa_source *s) {
1411 pa_source_output *o;
1414 pa_source_assert_ref(s);
1416 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1417 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1424 /* Called from main thread. Only called for the root source in volume sharing
1425 * cases, except for internal recursive calls. */
1426 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1427 pa_source_output *o;
1430 pa_source_assert_ref(s);
1431 pa_assert(new_volume);
1432 pa_assert(channel_map);
1434 s->real_volume = *new_volume;
1435 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1437 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1438 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1439 if (pa_source_flat_volume_enabled(s)) {
1440 pa_cvolume new_output_volume;
1442 /* Follow the root source's real volume. */
1443 new_output_volume = *new_volume;
1444 pa_cvolume_remap(&new_output_volume, channel_map, &o->channel_map);
1445 pa_source_output_set_volume_direct(o, &new_output_volume);
1446 compute_reference_ratio(o);
1449 update_real_volume(o->destination_source, new_volume, channel_map);
1454 /* Called from main thread. Only called for the root source in shared volume
1456 static void compute_real_volume(pa_source *s) {
1457 pa_source_assert_ref(s);
1458 pa_assert_ctl_context();
1459 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1460 pa_assert(pa_source_flat_volume_enabled(s));
1461 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1463 /* This determines the maximum volume of all streams and sets
1464 * s->real_volume accordingly. */
1466 if (!has_outputs(s)) {
1467 /* In the special case that we have no source outputs we leave the
1468 * volume unmodified. */
1469 update_real_volume(s, &s->reference_volume, &s->channel_map);
1473 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1475 /* First let's determine the new maximum volume of all outputs
1476 * connected to this source */
1477 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1478 update_real_volume(s, &s->real_volume, &s->channel_map);
1480 /* Then, let's update the real ratios/soft volumes of all outputs
1481 * connected to this source */
1482 compute_real_ratios(s);
1485 /* Called from main thread. Only called for the root source in shared volume
1486 * cases, except for internal recursive calls. */
1487 static void propagate_reference_volume(pa_source *s) {
1488 pa_source_output *o;
1491 pa_source_assert_ref(s);
1492 pa_assert_ctl_context();
1493 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1494 pa_assert(pa_source_flat_volume_enabled(s));
1496 /* This is called whenever the source volume changes that is not
1497 * caused by a source output volume change. We need to fix up the
1498 * source output volumes accordingly */
1500 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1501 pa_cvolume new_volume;
1503 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1504 propagate_reference_volume(o->destination_source);
1506 /* Since the origin source uses volume sharing, this output's volume
1507 * needs to be updated to match the root source's real volume, but
1508 * that will be done later in update_shared_real_volume(). */
1512 /* This basically calculates:
1514 * o->volume := o->reference_volume * o->reference_ratio */
1516 new_volume = s->reference_volume;
1517 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1518 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1519 pa_source_output_set_volume_direct(o, &new_volume);
1523 /* Called from main thread. Only called for the root source in volume sharing
1524 * cases, except for internal recursive calls. The return value indicates
1525 * whether any reference volume actually changed. */
1526 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1528 bool reference_volume_changed;
1529 pa_source_output *o;
1532 pa_source_assert_ref(s);
1533 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1535 pa_assert(channel_map);
1536 pa_assert(pa_cvolume_valid(v));
1539 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1541 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1542 pa_source_set_reference_volume_direct(s, &volume);
1544 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1546 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1547 /* If the root source's volume doesn't change, then there can't be any
1548 * changes in the other source in the source tree either.
1550 * It's probably theoretically possible that even if the root source's
1551 * volume changes slightly, some filter source doesn't change its volume
1552 * due to rounding errors. If that happens, we still want to propagate
1553 * the changed root source volume to the sources connected to the
1554 * intermediate source that didn't change its volume. This theoretical
1555 * possibility is the reason why we have that !(s->flags &
1556 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1557 * notice even if we returned here false always if
1558 * reference_volume_changed is false. */
1561 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1562 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1563 update_reference_volume(o->destination_source, v, channel_map, false);
1569 /* Called from main thread */
1570 void pa_source_set_volume(
1572 const pa_cvolume *volume,
1576 pa_cvolume new_reference_volume, root_real_volume;
1577 pa_source *root_source;
1579 pa_source_assert_ref(s);
1580 pa_assert_ctl_context();
1581 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1582 pa_assert(!volume || pa_cvolume_valid(volume));
1583 pa_assert(volume || pa_source_flat_volume_enabled(s));
1584 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1586 /* make sure we don't change the volume in PASSTHROUGH mode ...
1587 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1588 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1589 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1593 /* In case of volume sharing, the volume is set for the root source first,
1594 * from which it's then propagated to the sharing sources. */
1595 root_source = pa_source_get_master(s);
1597 if (PA_UNLIKELY(!root_source))
1600 /* As a special exception we accept mono volumes on all sources --
1601 * even on those with more complex channel maps */
1604 if (pa_cvolume_compatible(volume, &s->sample_spec))
1605 new_reference_volume = *volume;
1607 new_reference_volume = s->reference_volume;
1608 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1611 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1613 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1614 if (pa_source_flat_volume_enabled(root_source)) {
1615 /* OK, propagate this volume change back to the outputs */
1616 propagate_reference_volume(root_source);
1618 /* And now recalculate the real volume */
1619 compute_real_volume(root_source);
1621 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1625 /* If volume is NULL we synchronize the source's real and
1626 * reference volumes with the stream volumes. */
1628 pa_assert(pa_source_flat_volume_enabled(root_source));
1630 /* Ok, let's determine the new real volume */
1631 compute_real_volume(root_source);
1633 /* To propagate the reference volume from the filter to the root source,
1634 * we first take the real volume from the root source and remap it to
1635 * match the filter. Then, we merge in the reference volume from the
1636 * filter on top of this, and remap it back to the root source channel
1638 root_real_volume = root_source->real_volume;
1639 /* First we remap root's real volume to filter channel count and map if needed */
1640 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1641 pa_cvolume_remap(&root_real_volume, &root_source->channel_map, &s->channel_map);
1642 /* Then let's 'push' the reference volume if necessary */
1643 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_real_volume);
1644 /* If the source and its root don't have the same number of channels, we need to remap back */
1645 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1646 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1648 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1650 /* Now that the reference volume is updated, we can update the streams'
1651 * reference ratios. */
1652 compute_reference_ratios(root_source);
1655 if (root_source->set_volume) {
1656 /* If we have a function set_volume(), then we do not apply a
1657 * soft volume by default. However, set_volume() is free to
1658 * apply one to root_source->soft_volume */
1660 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1661 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1662 root_source->set_volume(root_source);
1665 /* If we have no function set_volume(), then the soft volume
1666 * becomes the real volume */
1667 root_source->soft_volume = root_source->real_volume;
1669 /* This tells the source that soft volume and/or real volume changed */
1671 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1674 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1675 * Only to be called by source implementor */
1676 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1678 pa_source_assert_ref(s);
1679 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1681 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1682 pa_source_assert_io_context(s);
1684 pa_assert_ctl_context();
1687 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1689 s->soft_volume = *volume;
1691 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1692 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1694 s->thread_info.soft_volume = s->soft_volume;
1697 /* Called from the main thread. Only called for the root source in volume sharing
1698 * cases, except for internal recursive calls. */
1699 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1700 pa_source_output *o;
1703 pa_source_assert_ref(s);
1704 pa_assert(old_real_volume);
1705 pa_assert_ctl_context();
1706 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1708 /* This is called when the hardware's real volume changes due to
1709 * some external event. We copy the real volume into our
1710 * reference volume and then rebuild the stream volumes based on
1711 * i->real_ratio which should stay fixed. */
1713 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1714 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1717 /* 1. Make the real volume the reference volume */
1718 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1721 if (pa_source_flat_volume_enabled(s)) {
1722 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1723 pa_cvolume new_volume;
1725 /* 2. Since the source's reference and real volumes are equal
1726 * now our ratios should be too. */
1727 pa_source_output_set_reference_ratio(o, &o->real_ratio);
1729 /* 3. Recalculate the new stream reference volume based on the
1730 * reference ratio and the sink's reference volume.
1732 * This basically calculates:
1734 * o->volume = s->reference_volume * o->reference_ratio
1736 * This is identical to propagate_reference_volume() */
1737 new_volume = s->reference_volume;
1738 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1739 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1740 pa_source_output_set_volume_direct(o, &new_volume);
1742 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1743 propagate_real_volume(o->destination_source, old_real_volume);
1747 /* Something got changed in the hardware. It probably makes sense
1748 * to save changed hw settings given that hw volume changes not
1749 * triggered by PA are almost certainly done by the user. */
1750 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1751 s->save_volume = true;
1754 /* Called from io thread */
1755 void pa_source_update_volume_and_mute(pa_source *s) {
1757 pa_source_assert_io_context(s);
1759 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1762 /* Called from main thread */
1763 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1764 pa_source_assert_ref(s);
1765 pa_assert_ctl_context();
1766 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1768 if (s->refresh_volume || force_refresh) {
1769 struct pa_cvolume old_real_volume;
1771 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1773 old_real_volume = s->real_volume;
1775 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1778 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1780 update_real_volume(s, &s->real_volume, &s->channel_map);
1781 propagate_real_volume(s, &old_real_volume);
1784 return &s->reference_volume;
1787 /* Called from main thread. In volume sharing cases, only the root source may
1789 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1790 pa_cvolume old_real_volume;
1792 pa_source_assert_ref(s);
1793 pa_assert_ctl_context();
1794 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1795 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1797 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1799 old_real_volume = s->real_volume;
1800 update_real_volume(s, new_real_volume, &s->channel_map);
1801 propagate_real_volume(s, &old_real_volume);
1804 /* Called from main thread */
1805 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1808 pa_source_assert_ref(s);
1809 pa_assert_ctl_context();
1811 old_muted = s->muted;
1813 if (mute == old_muted) {
1814 s->save_muted |= save;
1819 s->save_muted = save;
1821 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute) {
1822 s->set_mute_in_progress = true;
1824 s->set_mute_in_progress = false;
1827 if (!PA_SOURCE_IS_LINKED(s->state))
1830 pa_log_debug("The mute of source %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
1831 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1832 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1833 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_MUTE_CHANGED], s);
1836 /* Called from main thread */
1837 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1839 pa_source_assert_ref(s);
1840 pa_assert_ctl_context();
1841 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1843 if ((s->refresh_muted || force_refresh) && s->get_mute) {
1846 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1847 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
1848 pa_source_mute_changed(s, mute);
1850 if (s->get_mute(s, &mute) >= 0)
1851 pa_source_mute_changed(s, mute);
1858 /* Called from main thread */
1859 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1860 pa_source_assert_ref(s);
1861 pa_assert_ctl_context();
1862 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1864 if (s->set_mute_in_progress)
1867 /* pa_source_set_mute() does this same check, so this may appear redundant,
1868 * but we must have this here also, because the save parameter of
1869 * pa_source_set_mute() would otherwise have unintended side effects
1870 * (saving the mute state when it shouldn't be saved). */
1871 if (new_muted == s->muted)
1874 pa_source_set_mute(s, new_muted, true);
1877 /* Called from main thread */
1878 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1879 pa_source_assert_ref(s);
1880 pa_assert_ctl_context();
1883 pa_proplist_update(s->proplist, mode, p);
1885 if (PA_SOURCE_IS_LINKED(s->state)) {
1886 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1887 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1893 /* Called from main thread */
1894 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1895 void pa_source_set_description(pa_source *s, const char *description) {
1897 pa_source_assert_ref(s);
1898 pa_assert_ctl_context();
1900 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1903 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1905 if (old && description && pa_streq(old, description))
1909 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1911 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1913 if (PA_SOURCE_IS_LINKED(s->state)) {
1914 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1915 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1919 /* Called from main thread */
1920 unsigned pa_source_linked_by(pa_source *s) {
1921 pa_source_assert_ref(s);
1922 pa_assert_ctl_context();
1923 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1925 return pa_idxset_size(s->outputs);
1928 /* Called from main thread */
1929 unsigned pa_source_used_by(pa_source *s) {
1932 pa_source_assert_ref(s);
1933 pa_assert_ctl_context();
1934 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1936 ret = pa_idxset_size(s->outputs);
1937 pa_assert(ret >= s->n_corked);
1939 return ret - s->n_corked;
1942 /* Called from main thread */
1943 unsigned pa_source_check_suspend(pa_source *s) {
1945 pa_source_output *o;
1948 pa_source_assert_ref(s);
1949 pa_assert_ctl_context();
1951 if (!PA_SOURCE_IS_LINKED(s->state))
1956 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1957 pa_source_output_state_t st;
1959 st = pa_source_output_get_state(o);
1961 /* We do not assert here. It is perfectly valid for a source output to
1962 * be in the INIT state (i.e. created, marked done but not yet put)
1963 * and we should not care if it's unlinked as it won't contribute
1964 * towards our busy status.
1966 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1969 if (st == PA_SOURCE_OUTPUT_CORKED)
1972 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1981 /* Called from the IO thread */
1982 static void sync_output_volumes_within_thread(pa_source *s) {
1983 pa_source_output *o;
1986 pa_source_assert_ref(s);
1987 pa_source_assert_io_context(s);
1989 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1990 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1993 o->thread_info.soft_volume = o->soft_volume;
1994 //pa_source_output_request_rewind(o, 0, true, false, false);
1998 /* Called from the IO thread. Only called for the root source in volume sharing
1999 * cases, except for internal recursive calls. */
2000 static void set_shared_volume_within_thread(pa_source *s) {
2001 pa_source_output *o;
2004 pa_source_assert_ref(s);
2006 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2008 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2009 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
2010 set_shared_volume_within_thread(o->destination_source);
2014 /* Called from IO thread, except when it is not */
2015 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2016 pa_source *s = PA_SOURCE(object);
2017 pa_source_assert_ref(s);
2019 switch ((pa_source_message_t) code) {
2021 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2022 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2024 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2026 if (o->direct_on_input) {
2027 o->thread_info.direct_on_input = o->direct_on_input;
2028 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2031 pa_assert(!o->thread_info.attached);
2032 o->thread_info.attached = true;
2037 pa_source_output_set_state_within_thread(o, o->state);
2039 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2040 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2042 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2044 /* We don't just invalidate the requested latency here,
2045 * because if we are in a move we might need to fix up the
2046 * requested latency. */
2047 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2049 /* In flat volume mode we need to update the volume as
2051 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2054 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2055 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2057 pa_source_output_set_state_within_thread(o, o->state);
2062 pa_assert(o->thread_info.attached);
2063 o->thread_info.attached = false;
2065 if (o->thread_info.direct_on_input) {
2066 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2067 o->thread_info.direct_on_input = NULL;
2070 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2071 pa_source_invalidate_requested_latency(s, true);
2073 /* In flat volume mode we need to update the volume as
2075 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2078 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2079 pa_source *root_source = pa_source_get_master(s);
2081 if (PA_LIKELY(root_source))
2082 set_shared_volume_within_thread(root_source);
2087 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2089 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2091 pa_source_volume_change_push(s);
2093 /* Fall through ... */
2095 case PA_SOURCE_MESSAGE_SET_VOLUME:
2097 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2098 s->thread_info.soft_volume = s->soft_volume;
2101 /* Fall through ... */
2103 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2104 sync_output_volumes_within_thread(s);
2107 case PA_SOURCE_MESSAGE_GET_VOLUME:
2109 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2111 pa_source_volume_change_flush(s);
2112 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2115 /* In case source implementor reset SW volume. */
2116 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2117 s->thread_info.soft_volume = s->soft_volume;
2122 case PA_SOURCE_MESSAGE_SET_MUTE:
2124 if (s->thread_info.soft_muted != s->muted) {
2125 s->thread_info.soft_muted = s->muted;
2128 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2133 case PA_SOURCE_MESSAGE_GET_MUTE:
2135 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2136 return s->get_mute(s, userdata);
2140 case PA_SOURCE_MESSAGE_SET_STATE: {
2142 bool suspend_change =
2143 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2144 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2146 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2148 if (suspend_change) {
2149 pa_source_output *o;
2152 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2153 if (o->suspend_within_thread)
2154 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2160 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2162 pa_usec_t *usec = userdata;
2163 *usec = pa_source_get_requested_latency_within_thread(s);
2165 /* Yes, that's right, the IO thread will see -1 when no
2166 * explicit requested latency is configured, the main
2167 * thread will see max_latency */
2168 if (*usec == (pa_usec_t) -1)
2169 *usec = s->thread_info.max_latency;
2174 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2175 pa_usec_t *r = userdata;
2177 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2182 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2183 pa_usec_t *r = userdata;
2185 r[0] = s->thread_info.min_latency;
2186 r[1] = s->thread_info.max_latency;
2191 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2193 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2196 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2198 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2201 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2203 *((size_t*) userdata) = s->thread_info.max_rewind;
2206 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2208 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2211 case PA_SOURCE_MESSAGE_GET_LATENCY:
2213 if (s->monitor_of) {
2214 *((pa_usec_t*) userdata) = 0;
2218 /* Implementors need to overwrite this implementation! */
2221 case PA_SOURCE_MESSAGE_SET_PORT:
2223 pa_assert(userdata);
2225 struct source_message_set_port *msg_data = userdata;
2226 msg_data->ret = s->set_port(s, msg_data->port);
2230 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2231 /* This message is sent from IO-thread and handled in main thread. */
2232 pa_assert_ctl_context();
2234 /* Make sure we're not messing with main thread when no longer linked */
2235 if (!PA_SOURCE_IS_LINKED(s->state))
2238 pa_source_get_volume(s, true);
2239 pa_source_get_mute(s, true);
2242 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2243 s->thread_info.latency_offset = offset;
2246 case PA_SOURCE_MESSAGE_MAX:
2253 /* Called from main thread */
2254 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2259 pa_core_assert_ref(c);
2260 pa_assert_ctl_context();
2261 pa_assert(cause != 0);
2263 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2266 if (source->monitor_of)
2269 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2276 /* Called from IO thread */
2277 void pa_source_detach_within_thread(pa_source *s) {
2278 pa_source_output *o;
2281 pa_source_assert_ref(s);
2282 pa_source_assert_io_context(s);
2283 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2285 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2290 /* Called from IO thread */
2291 void pa_source_attach_within_thread(pa_source *s) {
2292 pa_source_output *o;
2295 pa_source_assert_ref(s);
2296 pa_source_assert_io_context(s);
2297 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2299 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2304 /* Called from IO thread */
2305 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2306 pa_usec_t result = (pa_usec_t) -1;
2307 pa_source_output *o;
2310 pa_source_assert_ref(s);
2311 pa_source_assert_io_context(s);
2313 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2314 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2316 if (s->thread_info.requested_latency_valid)
2317 return s->thread_info.requested_latency;
2319 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2320 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2321 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2322 result = o->thread_info.requested_source_latency;
2324 if (result != (pa_usec_t) -1)
2325 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2327 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2328 /* Only cache this if we are fully set up */
2329 s->thread_info.requested_latency = result;
2330 s->thread_info.requested_latency_valid = true;
2336 /* Called from main thread */
2337 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2340 pa_source_assert_ref(s);
2341 pa_assert_ctl_context();
2342 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2344 if (s->state == PA_SOURCE_SUSPENDED)
2347 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2352 /* Called from IO thread */
2353 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2354 pa_source_output *o;
2357 pa_source_assert_ref(s);
2358 pa_source_assert_io_context(s);
2360 if (max_rewind == s->thread_info.max_rewind)
2363 s->thread_info.max_rewind = max_rewind;
2365 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2366 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2367 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2370 /* Called from main thread */
2371 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2372 pa_source_assert_ref(s);
2373 pa_assert_ctl_context();
2375 if (PA_SOURCE_IS_LINKED(s->state))
2376 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2378 pa_source_set_max_rewind_within_thread(s, max_rewind);
2381 /* Called from IO thread */
2382 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2383 pa_source_output *o;
2386 pa_source_assert_ref(s);
2387 pa_source_assert_io_context(s);
2389 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2390 s->thread_info.requested_latency_valid = false;
2394 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2396 if (s->update_requested_latency)
2397 s->update_requested_latency(s);
2399 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2400 if (o->update_source_requested_latency)
2401 o->update_source_requested_latency(o);
2405 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2408 /* Called from main thread */
2409 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2410 pa_source_assert_ref(s);
2411 pa_assert_ctl_context();
2413 /* min_latency == 0: no limit
2414 * min_latency anything else: specified limit
2416 * Similar for max_latency */
2418 if (min_latency < ABSOLUTE_MIN_LATENCY)
2419 min_latency = ABSOLUTE_MIN_LATENCY;
2421 if (max_latency <= 0 ||
2422 max_latency > ABSOLUTE_MAX_LATENCY)
2423 max_latency = ABSOLUTE_MAX_LATENCY;
2425 pa_assert(min_latency <= max_latency);
2427 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2428 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2429 max_latency == ABSOLUTE_MAX_LATENCY) ||
2430 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2432 if (PA_SOURCE_IS_LINKED(s->state)) {
2438 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2440 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2443 /* Called from main thread */
2444 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2445 pa_source_assert_ref(s);
2446 pa_assert_ctl_context();
2447 pa_assert(min_latency);
2448 pa_assert(max_latency);
2450 if (PA_SOURCE_IS_LINKED(s->state)) {
2451 pa_usec_t r[2] = { 0, 0 };
2453 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2455 *min_latency = r[0];
2456 *max_latency = r[1];
2458 *min_latency = s->thread_info.min_latency;
2459 *max_latency = s->thread_info.max_latency;
2463 /* Called from IO thread, and from main thread before pa_source_put() is called */
2464 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2465 pa_source_assert_ref(s);
2466 pa_source_assert_io_context(s);
2468 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2469 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2470 pa_assert(min_latency <= max_latency);
2472 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2473 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2474 max_latency == ABSOLUTE_MAX_LATENCY) ||
2475 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2478 if (s->thread_info.min_latency == min_latency &&
2479 s->thread_info.max_latency == max_latency)
2482 s->thread_info.min_latency = min_latency;
2483 s->thread_info.max_latency = max_latency;
2485 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2486 pa_source_output *o;
2489 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2490 if (o->update_source_latency_range)
2491 o->update_source_latency_range(o);
2494 pa_source_invalidate_requested_latency(s, false);
2497 /* Called from main thread, before the source is put */
2498 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2499 pa_source_assert_ref(s);
2500 pa_assert_ctl_context();
2502 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2503 pa_assert(latency == 0);
2507 if (latency < ABSOLUTE_MIN_LATENCY)
2508 latency = ABSOLUTE_MIN_LATENCY;
2510 if (latency > ABSOLUTE_MAX_LATENCY)
2511 latency = ABSOLUTE_MAX_LATENCY;
2513 if (PA_SOURCE_IS_LINKED(s->state))
2514 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2516 s->thread_info.fixed_latency = latency;
2519 /* Called from main thread */
2520 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2523 pa_source_assert_ref(s);
2524 pa_assert_ctl_context();
2526 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2529 if (PA_SOURCE_IS_LINKED(s->state))
2530 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2532 latency = s->thread_info.fixed_latency;
2537 /* Called from IO thread */
2538 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2539 pa_source_assert_ref(s);
2540 pa_source_assert_io_context(s);
2542 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2543 pa_assert(latency == 0);
2544 s->thread_info.fixed_latency = 0;
2549 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2550 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2552 if (s->thread_info.fixed_latency == latency)
2555 s->thread_info.fixed_latency = latency;
2557 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2558 pa_source_output *o;
2561 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2562 if (o->update_source_fixed_latency)
2563 o->update_source_fixed_latency(o);
2566 pa_source_invalidate_requested_latency(s, false);
2569 /* Called from main thread */
2570 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2571 pa_source_assert_ref(s);
2573 s->latency_offset = offset;
2575 if (PA_SOURCE_IS_LINKED(s->state))
2576 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2578 s->thread_info.latency_offset = offset;
2581 /* Called from main thread */
2582 size_t pa_source_get_max_rewind(pa_source *s) {
2584 pa_assert_ctl_context();
2585 pa_source_assert_ref(s);
2587 if (!PA_SOURCE_IS_LINKED(s->state))
2588 return s->thread_info.max_rewind;
2590 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2595 /* Called from main context */
2596 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2597 pa_device_port *port;
2600 pa_source_assert_ref(s);
2601 pa_assert_ctl_context();
2604 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2605 return -PA_ERR_NOTIMPLEMENTED;
2609 return -PA_ERR_NOENTITY;
2611 if (!(port = pa_hashmap_get(s->ports, name)))
2612 return -PA_ERR_NOENTITY;
2614 if (s->active_port == port) {
2615 s->save_port = s->save_port || save;
2619 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2620 struct source_message_set_port msg = { .port = port, .ret = 0 };
2621 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2625 ret = s->set_port(s, port);
2628 return -PA_ERR_NOENTITY;
2630 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2632 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2634 s->active_port = port;
2635 s->save_port = save;
2637 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2642 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2644 /* Called from the IO thread. */
2645 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2646 pa_source_volume_change *c;
2647 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2648 c = pa_xnew(pa_source_volume_change, 1);
2650 PA_LLIST_INIT(pa_source_volume_change, c);
2652 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2656 /* Called from the IO thread. */
2657 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2659 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2663 /* Called from the IO thread. */
2664 void pa_source_volume_change_push(pa_source *s) {
2665 pa_source_volume_change *c = NULL;
2666 pa_source_volume_change *nc = NULL;
2667 pa_source_volume_change *pc = NULL;
2668 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2670 const char *direction = NULL;
2673 nc = pa_source_volume_change_new(s);
2675 /* NOTE: There is already more different volumes in pa_source that I can remember.
2676 * Adding one more volume for HW would get us rid of this, but I am trying
2677 * to survive with the ones we already have. */
2678 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2680 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2681 pa_log_debug("Volume not changing");
2682 pa_source_volume_change_free(nc);
2686 nc->at = pa_source_get_latency_within_thread(s);
2687 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2689 if (s->thread_info.volume_changes_tail) {
2690 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2691 /* If volume is going up let's do it a bit late. If it is going
2692 * down let's do it a bit early. */
2693 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2694 if (nc->at + safety_margin > c->at) {
2695 nc->at += safety_margin;
2700 else if (nc->at - safety_margin > c->at) {
2701 nc->at -= safety_margin;
2709 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2710 nc->at += safety_margin;
2713 nc->at -= safety_margin;
2716 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2719 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2722 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2724 /* We can ignore volume events that came earlier but should happen later than this. */
2725 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
2726 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2727 pa_source_volume_change_free(c);
2730 s->thread_info.volume_changes_tail = nc;
2733 /* Called from the IO thread. */
2734 static void pa_source_volume_change_flush(pa_source *s) {
2735 pa_source_volume_change *c = s->thread_info.volume_changes;
2737 s->thread_info.volume_changes = NULL;
2738 s->thread_info.volume_changes_tail = NULL;
2740 pa_source_volume_change *next = c->next;
2741 pa_source_volume_change_free(c);
2746 /* Called from the IO thread. */
2747 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2753 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2759 pa_assert(s->write_volume);
2761 now = pa_rtclock_now();
2763 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2764 pa_source_volume_change *c = s->thread_info.volume_changes;
2765 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2766 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2767 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2769 s->thread_info.current_hw_volume = c->hw_volume;
2770 pa_source_volume_change_free(c);
2776 if (s->thread_info.volume_changes) {
2778 *usec_to_next = s->thread_info.volume_changes->at - now;
2779 if (pa_log_ratelimit(PA_LOG_DEBUG))
2780 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2785 s->thread_info.volume_changes_tail = NULL;
2790 /* Called from the main thread */
2791 /* Gets the list of formats supported by the source. The members and idxset must
2792 * be freed by the caller. */
2793 pa_idxset* pa_source_get_formats(pa_source *s) {
2798 if (s->get_formats) {
2799 /* Source supports format query, all is good */
2800 ret = s->get_formats(s);
2802 /* Source doesn't support format query, so assume it does PCM */
2803 pa_format_info *f = pa_format_info_new();
2804 f->encoding = PA_ENCODING_PCM;
2806 ret = pa_idxset_new(NULL, NULL);
2807 pa_idxset_put(ret, f, NULL);
2813 /* Called from the main thread */
2814 /* Checks if the source can accept this format */
2815 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2816 pa_idxset *formats = NULL;
2822 formats = pa_source_get_formats(s);
2825 pa_format_info *finfo_device;
2828 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2829 if (pa_format_info_is_compatible(finfo_device, f)) {
2835 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2841 /* Called from the main thread */
2842 /* Calculates the intersection between formats supported by the source and
2843 * in_formats, and returns these, in the order of the source's formats. */
2844 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2845 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2846 pa_format_info *f_source, *f_in;
2851 if (!in_formats || pa_idxset_isempty(in_formats))
2854 source_formats = pa_source_get_formats(s);
2856 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2857 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2858 if (pa_format_info_is_compatible(f_source, f_in))
2859 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2865 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2870 /* Called from the main thread. */
2871 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2872 pa_cvolume old_volume;
2873 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2874 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2879 old_volume = s->reference_volume;
2881 if (pa_cvolume_equal(volume, &old_volume))
2884 s->reference_volume = *volume;
2885 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
2886 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
2887 s->flags & PA_SOURCE_DECIBEL_VOLUME),
2888 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
2889 s->flags & PA_SOURCE_DECIBEL_VOLUME));
2891 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2892 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_VOLUME_CHANGED], s);