2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <pulse/format.h>
29 #include <pulse/utf8.h>
30 #include <pulse/xmalloc.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/rtclock.h>
34 #include <pulse/internal.h>
36 #include <pulsecore/core-util.h>
37 #include <pulsecore/source-output.h>
38 #include <pulsecore/namereg.h>
39 #include <pulsecore/core-subscribe.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/mix.h>
42 #include <pulsecore/flist.h>
46 #define ABSOLUTE_MIN_LATENCY (500)
47 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
48 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
50 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
52 struct pa_source_volume_change {
56 PA_LLIST_FIELDS(pa_source_volume_change);
59 struct source_message_set_port {
64 static void source_free(pa_object *o);
66 static void pa_source_volume_change_push(pa_source *s);
67 static void pa_source_volume_change_flush(pa_source *s);
69 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
73 data->proplist = pa_proplist_new();
74 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
83 data->name = pa_xstrdup(name);
86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
100 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
103 data->alternate_sample_rate_is_set = true;
104 data->alternate_sample_rate = alternate_sample_rate;
107 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
110 if ((data->volume_is_set = !!volume))
111 data->volume = *volume;
114 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
117 data->muted_is_set = true;
121 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
124 pa_xfree(data->active_port);
125 data->active_port = pa_xstrdup(port);
128 void pa_source_new_data_done(pa_source_new_data *data) {
131 pa_proplist_free(data->proplist);
134 pa_hashmap_free(data->ports);
136 pa_xfree(data->name);
137 pa_xfree(data->active_port);
140 /* Called from main context */
141 static void reset_callbacks(pa_source *s) {
145 s->get_volume = NULL;
146 s->set_volume = NULL;
147 s->write_volume = NULL;
150 s->update_requested_latency = NULL;
152 s->get_formats = NULL;
153 s->update_rate = NULL;
156 /* Called from main context */
157 pa_source* pa_source_new(
159 pa_source_new_data *data,
160 pa_source_flags_t flags) {
164 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
169 pa_assert(data->name);
170 pa_assert_ctl_context();
172 s = pa_msgobject_new(pa_source);
174 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
175 pa_log_debug("Failed to register name %s.", data->name);
180 pa_source_new_data_set_name(data, name);
182 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
184 pa_namereg_unregister(core, name);
188 /* FIXME, need to free s here on failure */
190 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
191 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
193 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
195 if (!data->channel_map_is_set)
196 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
198 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
199 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
201 /* FIXME: There should probably be a general function for checking whether
202 * the source volume is allowed to be set, like there is for source outputs. */
203 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
205 if (!data->volume_is_set) {
206 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
207 data->save_volume = false;
210 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
211 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
213 if (!data->muted_is_set)
217 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
219 pa_device_init_description(data->proplist, data->card);
220 pa_device_init_icon(data->proplist, false);
221 pa_device_init_intended_roles(data->proplist);
223 if (!data->active_port) {
224 pa_device_port *p = pa_device_port_find_best(data->ports);
226 pa_source_new_data_set_port(data, p->name);
229 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
231 pa_namereg_unregister(core, name);
235 s->parent.parent.free = source_free;
236 s->parent.process_msg = pa_source_process_msg;
239 s->state = PA_SOURCE_INIT;
242 s->suspend_cause = data->suspend_cause;
243 pa_source_set_mixer_dirty(s, false);
244 s->name = pa_xstrdup(name);
245 s->proplist = pa_proplist_copy(data->proplist);
246 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
247 s->module = data->module;
248 s->card = data->card;
250 s->priority = pa_device_init_priority(s->proplist);
252 s->sample_spec = data->sample_spec;
253 s->channel_map = data->channel_map;
254 s->default_sample_rate = s->sample_spec.rate;
256 if (data->alternate_sample_rate_is_set)
257 s->alternate_sample_rate = data->alternate_sample_rate;
259 s->alternate_sample_rate = s->core->alternate_sample_rate;
261 if (s->sample_spec.rate == s->alternate_sample_rate) {
262 pa_log_warn("Default and alternate sample rates are the same.");
263 s->alternate_sample_rate = 0;
266 s->outputs = pa_idxset_new(NULL, NULL);
268 s->monitor_of = NULL;
269 s->output_from_master = NULL;
271 s->reference_volume = s->real_volume = data->volume;
272 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
273 s->base_volume = PA_VOLUME_NORM;
274 s->n_volume_steps = PA_VOLUME_NORM+1;
275 s->muted = data->muted;
276 s->refresh_volume = s->refresh_muted = false;
283 /* As a minor optimization we just steal the list instead of
285 s->ports = data->ports;
288 s->active_port = NULL;
289 s->save_port = false;
291 if (data->active_port)
292 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
293 s->save_port = data->save_port;
295 /* Hopefully the active port has already been assigned in the previous call
296 to pa_device_port_find_best, but better safe than sorry */
298 s->active_port = pa_device_port_find_best(s->ports);
301 s->latency_offset = s->active_port->latency_offset;
303 s->latency_offset = 0;
305 s->save_volume = data->save_volume;
306 s->save_muted = data->save_muted;
308 pa_silence_memchunk_get(
309 &core->silence_cache,
315 s->thread_info.rtpoll = NULL;
316 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
317 (pa_free_cb_t) pa_source_output_unref);
318 s->thread_info.soft_volume = s->soft_volume;
319 s->thread_info.soft_muted = s->muted;
320 s->thread_info.state = s->state;
321 s->thread_info.max_rewind = 0;
322 s->thread_info.requested_latency_valid = false;
323 s->thread_info.requested_latency = 0;
324 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
325 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
326 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
328 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
329 s->thread_info.volume_changes_tail = NULL;
330 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
331 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
332 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
333 s->thread_info.latency_offset = s->latency_offset;
335 /* FIXME: This should probably be moved to pa_source_put() */
336 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
339 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
341 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
342 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
345 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
346 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
353 /* Called from main context */
354 static int source_set_state(pa_source *s, pa_source_state_t state) {
357 pa_source_state_t original_state;
360 pa_assert_ctl_context();
362 if (s->state == state)
365 original_state = s->state;
368 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
369 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
372 if ((ret = s->set_state(s, state)) < 0)
376 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379 s->set_state(s, original_state);
386 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
387 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
388 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 if (suspend_change) {
395 /* We're suspending or resuming, tell everyone about it */
397 PA_IDXSET_FOREACH(o, s->outputs, idx)
398 if (s->state == PA_SOURCE_SUSPENDED &&
399 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
400 pa_source_output_kill(o);
402 o->suspend(o, state == PA_SOURCE_SUSPENDED);
408 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
414 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
415 pa_source_flags_t flags;
418 pa_assert(!s->write_volume || cb);
422 /* Save the current flags so we can tell if they've changed */
426 /* The source implementor is responsible for setting decibel volume support */
427 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
429 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
430 /* See note below in pa_source_put() about volume sharing and decibel volumes */
431 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
434 /* If the flags have changed after init, let any clients know via a change event */
435 if (s->state != PA_SOURCE_INIT && flags != s->flags)
436 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
439 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
440 pa_source_flags_t flags;
443 pa_assert(!cb || s->set_volume);
445 s->write_volume = cb;
447 /* Save the current flags so we can tell if they've changed */
451 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
453 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
455 /* If the flags have changed after init, let any clients know via a change event */
456 if (s->state != PA_SOURCE_INIT && flags != s->flags)
457 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
460 void pa_source_set_get_mute_callback(pa_source *s, pa_source_get_mute_cb_t cb) {
466 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
467 pa_source_flags_t flags;
473 /* Save the current flags so we can tell if they've changed */
477 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
479 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
481 /* If the flags have changed after init, let any clients know via a change event */
482 if (s->state != PA_SOURCE_INIT && flags != s->flags)
483 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 static void enable_flat_volume(pa_source *s, bool enable) {
487 pa_source_flags_t flags;
491 /* Always follow the overall user preference here */
492 enable = enable && s->core->flat_volumes;
494 /* Save the current flags so we can tell if they've changed */
498 s->flags |= PA_SOURCE_FLAT_VOLUME;
500 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
502 /* If the flags have changed after init, let any clients know via a change event */
503 if (s->state != PA_SOURCE_INIT && flags != s->flags)
504 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
508 pa_source_flags_t flags;
512 /* Save the current flags so we can tell if they've changed */
516 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
517 enable_flat_volume(s, true);
519 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
520 enable_flat_volume(s, false);
523 /* If the flags have changed after init, let any clients know via a change event */
524 if (s->state != PA_SOURCE_INIT && flags != s->flags)
525 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
528 /* Called from main context */
529 void pa_source_put(pa_source *s) {
530 pa_source_assert_ref(s);
531 pa_assert_ctl_context();
533 pa_assert(s->state == PA_SOURCE_INIT);
534 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || pa_source_is_filter(s));
536 /* The following fields must be initialized properly when calling _put() */
537 pa_assert(s->asyncmsgq);
538 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
540 /* Generally, flags should be initialized via pa_source_new(). As a
541 * special exception we allow some volume related flags to be set
542 * between _new() and _put() by the callback setter functions above.
544 * Thus we implement a couple safeguards here which ensure the above
545 * setters were used (or at least the implementor made manual changes
546 * in a compatible way).
548 * Note: All of these flags set here can change over the life time
550 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
551 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
552 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
554 /* XXX: Currently decibel volume is disabled for all sources that use volume
555 * sharing. When the master source supports decibel volume, it would be good
556 * to have the flag also in the filter source, but currently we don't do that
557 * so that the flags of the filter source never change when it's moved from
558 * a master source to another. One solution for this problem would be to
559 * remove user-visible volume altogether from filter sources when volume
560 * sharing is used, but the current approach was easier to implement... */
561 /* We always support decibel volumes in software, otherwise we leave it to
562 * the source implementor to set this flag as needed.
564 * Note: This flag can also change over the life time of the source. */
565 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
566 pa_source_enable_decibel_volume(s, true);
567 s->soft_volume = s->reference_volume;
570 /* If the source implementor support DB volumes by itself, we should always
571 * try and enable flat volumes too */
572 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
573 enable_flat_volume(s, true);
575 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
576 pa_source *root_source = pa_source_get_master(s);
578 pa_assert(PA_LIKELY(root_source));
580 s->reference_volume = root_source->reference_volume;
581 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
583 s->real_volume = root_source->real_volume;
584 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
586 /* We assume that if the sink implementor changed the default
587 * volume he did so in real_volume, because that is the usual
588 * place where he is supposed to place his changes. */
589 s->reference_volume = s->real_volume;
591 s->thread_info.soft_volume = s->soft_volume;
592 s->thread_info.soft_muted = s->muted;
593 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
595 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
596 || (s->base_volume == PA_VOLUME_NORM
597 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
598 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
599 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
601 if (s->suspend_cause)
602 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
604 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
606 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
607 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
610 /* Called from main context */
611 void pa_source_unlink(pa_source *s) {
613 pa_source_output *o, PA_UNUSED *j = NULL;
615 pa_source_assert_ref(s);
616 pa_assert_ctl_context();
618 /* See pa_sink_unlink() for a couple of comments how this function
621 if (s->unlink_requested)
624 s->unlink_requested = true;
626 linked = PA_SOURCE_IS_LINKED(s->state);
629 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
631 if (s->state != PA_SOURCE_UNLINKED)
632 pa_namereg_unregister(s->core, s->name);
633 pa_idxset_remove_by_data(s->core->sources, s, NULL);
636 pa_idxset_remove_by_data(s->card->sources, s, NULL);
638 while ((o = pa_idxset_first(s->outputs, NULL))) {
640 pa_source_output_kill(o);
645 source_set_state(s, PA_SOURCE_UNLINKED);
647 s->state = PA_SOURCE_UNLINKED;
652 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
653 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
657 /* Called from main context */
658 static void source_free(pa_object *o) {
659 pa_source *s = PA_SOURCE(o);
662 pa_assert_ctl_context();
663 pa_assert(pa_source_refcnt(s) == 0);
664 pa_assert(!PA_SOURCE_IS_LINKED(s->state));
666 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
668 pa_source_volume_change_flush(s);
670 pa_idxset_free(s->outputs, NULL);
671 pa_hashmap_free(s->thread_info.outputs);
673 if (s->silence.memblock)
674 pa_memblock_unref(s->silence.memblock);
680 pa_proplist_free(s->proplist);
683 pa_hashmap_free(s->ports);
688 /* Called from main context, and not while the IO thread is active, please */
689 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
690 pa_source_assert_ref(s);
691 pa_assert_ctl_context();
696 /* Called from main context, and not while the IO thread is active, please */
697 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
698 pa_source_flags_t old_flags;
699 pa_source_output *output;
702 pa_source_assert_ref(s);
703 pa_assert_ctl_context();
705 /* For now, allow only a minimal set of flags to be changed. */
706 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
708 old_flags = s->flags;
709 s->flags = (s->flags & ~mask) | (value & mask);
711 if (s->flags == old_flags)
714 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
715 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
717 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
718 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
719 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
721 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
722 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
724 PA_IDXSET_FOREACH(output, s->outputs, idx) {
725 if (output->destination_source)
726 pa_source_update_flags(output->destination_source, mask, value);
730 /* Called from IO context, or before _put() from main context */
731 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
732 pa_source_assert_ref(s);
733 pa_source_assert_io_context(s);
735 s->thread_info.rtpoll = p;
738 /* Called from main context */
739 int pa_source_update_status(pa_source*s) {
740 pa_source_assert_ref(s);
741 pa_assert_ctl_context();
742 pa_assert(PA_SOURCE_IS_LINKED(s->state));
744 if (s->state == PA_SOURCE_SUSPENDED)
747 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
750 /* Called from any context - must be threadsafe */
751 void pa_source_set_mixer_dirty(pa_source *s, bool is_dirty) {
752 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
755 /* Called from main context */
756 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
757 pa_source_assert_ref(s);
758 pa_assert_ctl_context();
759 pa_assert(PA_SOURCE_IS_LINKED(s->state));
760 pa_assert(cause != 0);
762 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
763 return -PA_ERR_NOTSUPPORTED;
766 s->suspend_cause |= cause;
768 s->suspend_cause &= ~cause;
770 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
771 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
772 it'll be handled just fine. */
773 pa_source_set_mixer_dirty(s, false);
774 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
775 if (s->active_port && s->set_port) {
776 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
777 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
778 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
781 s->set_port(s, s->active_port);
791 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
794 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
796 if (s->suspend_cause)
797 return source_set_state(s, PA_SOURCE_SUSPENDED);
799 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
802 /* Called from main context */
803 int pa_source_sync_suspend(pa_source *s) {
804 pa_sink_state_t state;
806 pa_source_assert_ref(s);
807 pa_assert_ctl_context();
808 pa_assert(PA_SOURCE_IS_LINKED(s->state));
809 pa_assert(s->monitor_of);
811 state = pa_sink_get_state(s->monitor_of);
813 if (state == PA_SINK_SUSPENDED)
814 return source_set_state(s, PA_SOURCE_SUSPENDED);
816 pa_assert(PA_SINK_IS_OPENED(state));
818 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
821 /* Called from main context */
822 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
823 pa_source_output *o, *n;
826 pa_source_assert_ref(s);
827 pa_assert_ctl_context();
828 pa_assert(PA_SOURCE_IS_LINKED(s->state));
833 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
834 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
836 pa_source_output_ref(o);
838 if (pa_source_output_start_move(o) >= 0)
841 pa_source_output_unref(o);
847 /* Called from main context */
848 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
851 pa_source_assert_ref(s);
852 pa_assert_ctl_context();
853 pa_assert(PA_SOURCE_IS_LINKED(s->state));
856 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
857 if (pa_source_output_finish_move(o, s, save) < 0)
858 pa_source_output_fail_move(o);
860 pa_source_output_unref(o);
863 pa_queue_free(q, NULL);
866 /* Called from main context */
867 void pa_source_move_all_fail(pa_queue *q) {
870 pa_assert_ctl_context();
873 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
874 pa_source_output_fail_move(o);
875 pa_source_output_unref(o);
878 pa_queue_free(q, NULL);
881 /* Called from IO thread context */
882 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
886 pa_source_assert_ref(s);
887 pa_source_assert_io_context(s);
888 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
893 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
896 pa_log_debug("Processing rewind...");
898 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
899 pa_source_output_assert_ref(o);
900 pa_source_output_process_rewind(o, nbytes);
904 /* Called from IO thread context */
905 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
909 pa_source_assert_ref(s);
910 pa_source_assert_io_context(s);
911 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
914 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
917 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
918 pa_memchunk vchunk = *chunk;
920 pa_memblock_ref(vchunk.memblock);
921 pa_memchunk_make_writable(&vchunk, 0);
923 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
924 pa_silence_memchunk(&vchunk, &s->sample_spec);
926 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
928 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
929 pa_source_output_assert_ref(o);
931 if (!o->thread_info.direct_on_input)
932 pa_source_output_push(o, &vchunk);
935 pa_memblock_unref(vchunk.memblock);
938 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
939 pa_source_output_assert_ref(o);
941 if (!o->thread_info.direct_on_input)
942 pa_source_output_push(o, chunk);
947 /* Called from IO thread context */
948 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
949 pa_source_assert_ref(s);
950 pa_source_assert_io_context(s);
951 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
952 pa_source_output_assert_ref(o);
953 pa_assert(o->thread_info.direct_on_input);
956 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
959 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
960 pa_memchunk vchunk = *chunk;
962 pa_memblock_ref(vchunk.memblock);
963 pa_memchunk_make_writable(&vchunk, 0);
965 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
966 pa_silence_memchunk(&vchunk, &s->sample_spec);
968 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
970 pa_source_output_push(o, &vchunk);
972 pa_memblock_unref(vchunk.memblock);
974 pa_source_output_push(o, chunk);
977 /* Called from main thread */
978 int pa_source_update_rate(pa_source *s, uint32_t rate, bool passthrough) {
980 uint32_t desired_rate = rate;
981 uint32_t default_rate = s->default_sample_rate;
982 uint32_t alternate_rate = s->alternate_sample_rate;
983 bool default_rate_is_usable = false;
984 bool alternate_rate_is_usable = false;
986 if (rate == s->sample_spec.rate)
989 if (!s->update_rate && !s->monitor_of)
992 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
993 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
997 if (PA_SOURCE_IS_RUNNING(s->state)) {
998 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
999 s->sample_spec.rate);
1003 if (s->monitor_of) {
1004 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
1005 pa_log_info("Cannot update rate, this is a monitor source and the sink is running.");
1010 if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
1013 if (!passthrough && default_rate != desired_rate && alternate_rate != desired_rate) {
1014 if (default_rate % 11025 == 0 && desired_rate % 11025 == 0)
1015 default_rate_is_usable = true;
1016 if (default_rate % 4000 == 0 && desired_rate % 4000 == 0)
1017 default_rate_is_usable = true;
1018 if (alternate_rate && alternate_rate % 11025 == 0 && desired_rate % 11025 == 0)
1019 alternate_rate_is_usable = true;
1020 if (alternate_rate && alternate_rate % 4000 == 0 && desired_rate % 4000 == 0)
1021 alternate_rate_is_usable = true;
1023 if (alternate_rate_is_usable && !default_rate_is_usable)
1024 desired_rate = alternate_rate;
1026 desired_rate = default_rate;
1029 if (desired_rate == s->sample_spec.rate)
1032 if (!passthrough && pa_source_used_by(s) > 0)
1035 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1036 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1039 ret = s->update_rate(s, desired_rate);
1041 /* This is a monitor source. */
1043 /* XXX: This code is written with non-passthrough streams in mind. I
1044 * have no idea whether the behaviour with passthrough streams is
1047 uint32_t old_rate = s->sample_spec.rate;
1049 s->sample_spec.rate = desired_rate;
1050 ret = pa_sink_update_rate(s->monitor_of, desired_rate, false);
1053 /* Changing the sink rate failed, roll back the old rate for
1054 * the monitor source. Why did we set the source rate before
1055 * calling pa_sink_update_rate(), you may ask. The reason is
1056 * that pa_sink_update_rate() tries to update the monitor
1057 * source rate, but we are already in the process of updating
1058 * the monitor source rate, so there's a risk of entering an
1059 * infinite loop. Setting the source rate before calling
1060 * pa_sink_update_rate() makes the rate == s->sample_spec.rate
1061 * check in the beginning of this function return early, so we
1063 s->sample_spec.rate = old_rate;
1071 pa_source_output *o;
1073 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1074 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1075 pa_source_output_update_rate(o);
1078 pa_log_info("Changed sampling rate successfully");
1081 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1086 /* Called from main thread */
1087 pa_usec_t pa_source_get_latency(pa_source *s) {
1090 pa_source_assert_ref(s);
1091 pa_assert_ctl_context();
1092 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1094 if (s->state == PA_SOURCE_SUSPENDED)
1097 if (!(s->flags & PA_SOURCE_LATENCY))
1100 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1102 /* usec is unsigned, so check that the offset can be added to usec without
1104 if (-s->latency_offset <= (int64_t) usec)
1105 usec += s->latency_offset;
1112 /* Called from IO thread */
1113 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1117 pa_source_assert_ref(s);
1118 pa_source_assert_io_context(s);
1119 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1121 /* The returned value is supposed to be in the time domain of the sound card! */
1123 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1126 if (!(s->flags & PA_SOURCE_LATENCY))
1129 o = PA_MSGOBJECT(s);
1131 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1133 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1136 /* usec is unsigned, so check that the offset can be added to usec without
1138 if (-s->thread_info.latency_offset <= (int64_t) usec)
1139 usec += s->thread_info.latency_offset;
1146 /* Called from the main thread (and also from the IO thread while the main
1147 * thread is waiting).
1149 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1150 * set. Instead, flat volume mode is detected by checking whether the root source
1151 * has the flag set. */
1152 bool pa_source_flat_volume_enabled(pa_source *s) {
1153 pa_source_assert_ref(s);
1155 s = pa_source_get_master(s);
1158 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1163 /* Called from the main thread (and also from the IO thread while the main
1164 * thread is waiting). */
1165 pa_source *pa_source_get_master(pa_source *s) {
1166 pa_source_assert_ref(s);
1168 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1169 if (PA_UNLIKELY(!s->output_from_master))
1172 s = s->output_from_master->source;
1178 /* Called from main context */
1179 bool pa_source_is_filter(pa_source *s) {
1180 pa_source_assert_ref(s);
1182 return (s->output_from_master != NULL);
1185 /* Called from main context */
1186 bool pa_source_is_passthrough(pa_source *s) {
1188 pa_source_assert_ref(s);
1190 /* NB Currently only monitor sources support passthrough mode */
1191 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1194 /* Called from main context */
1195 void pa_source_enter_passthrough(pa_source *s) {
1198 /* set the volume to NORM */
1199 s->saved_volume = *pa_source_get_volume(s, true);
1200 s->saved_save_volume = s->save_volume;
1202 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1203 pa_source_set_volume(s, &volume, true, false);
1206 /* Called from main context */
1207 void pa_source_leave_passthrough(pa_source *s) {
1208 /* Restore source volume to what it was before we entered passthrough mode */
1209 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1211 pa_cvolume_init(&s->saved_volume);
1212 s->saved_save_volume = false;
1215 /* Called from main context. */
1216 static void compute_reference_ratio(pa_source_output *o) {
1218 pa_cvolume remapped;
1222 pa_assert(pa_source_flat_volume_enabled(o->source));
1225 * Calculates the reference ratio from the source's reference
1226 * volume. This basically calculates:
1228 * o->reference_ratio = o->volume / o->source->reference_volume
1231 remapped = o->source->reference_volume;
1232 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1234 ratio = o->reference_ratio;
1236 for (c = 0; c < o->sample_spec.channels; c++) {
1238 /* We don't update when the source volume is 0 anyway */
1239 if (remapped.values[c] <= PA_VOLUME_MUTED)
1242 /* Don't update the reference ratio unless necessary */
1243 if (pa_sw_volume_multiply(
1245 remapped.values[c]) == o->volume.values[c])
1248 ratio.values[c] = pa_sw_volume_divide(
1249 o->volume.values[c],
1250 remapped.values[c]);
1253 pa_source_output_set_reference_ratio(o, &ratio);
1256 /* Called from main context. Only called for the root source in volume sharing
1257 * cases, except for internal recursive calls. */
1258 static void compute_reference_ratios(pa_source *s) {
1260 pa_source_output *o;
1262 pa_source_assert_ref(s);
1263 pa_assert_ctl_context();
1264 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1265 pa_assert(pa_source_flat_volume_enabled(s));
1267 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1268 compute_reference_ratio(o);
1270 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1271 compute_reference_ratios(o->destination_source);
1275 /* Called from main context. Only called for the root source in volume sharing
1276 * cases, except for internal recursive calls. */
1277 static void compute_real_ratios(pa_source *s) {
1278 pa_source_output *o;
1281 pa_source_assert_ref(s);
1282 pa_assert_ctl_context();
1283 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1284 pa_assert(pa_source_flat_volume_enabled(s));
1286 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1288 pa_cvolume remapped;
1290 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1291 /* The origin source uses volume sharing, so this input's real ratio
1292 * is handled as a special case - the real ratio must be 0 dB, and
1293 * as a result i->soft_volume must equal i->volume_factor. */
1294 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1295 o->soft_volume = o->volume_factor;
1297 compute_real_ratios(o->destination_source);
1303 * This basically calculates:
1305 * i->real_ratio := i->volume / s->real_volume
1306 * i->soft_volume := i->real_ratio * i->volume_factor
1309 remapped = s->real_volume;
1310 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1312 o->real_ratio.channels = o->sample_spec.channels;
1313 o->soft_volume.channels = o->sample_spec.channels;
1315 for (c = 0; c < o->sample_spec.channels; c++) {
1317 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1318 /* We leave o->real_ratio untouched */
1319 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1323 /* Don't lose accuracy unless necessary */
1324 if (pa_sw_volume_multiply(
1325 o->real_ratio.values[c],
1326 remapped.values[c]) != o->volume.values[c])
1328 o->real_ratio.values[c] = pa_sw_volume_divide(
1329 o->volume.values[c],
1330 remapped.values[c]);
1332 o->soft_volume.values[c] = pa_sw_volume_multiply(
1333 o->real_ratio.values[c],
1334 o->volume_factor.values[c]);
1337 /* We don't copy the soft_volume to the thread_info data
1338 * here. That must be done by the caller */
1342 static pa_cvolume *cvolume_remap_minimal_impact(
1344 const pa_cvolume *template,
1345 const pa_channel_map *from,
1346 const pa_channel_map *to) {
1351 pa_assert(template);
1354 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1355 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1357 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1358 * mapping from source output to source volumes:
1360 * If template is a possible remapping from v it is used instead
1361 * of remapping anew.
1363 * If the channel maps don't match we set an all-channel volume on
1364 * the source to ensure that changing a volume on one stream has no
1365 * effect that cannot be compensated for in another stream that
1366 * does not have the same channel map as the source. */
1368 if (pa_channel_map_equal(from, to))
1372 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1377 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1381 /* Called from main thread. Only called for the root source in volume sharing
1382 * cases, except for internal recursive calls. */
1383 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1384 pa_source_output *o;
1387 pa_source_assert_ref(s);
1388 pa_assert(max_volume);
1389 pa_assert(channel_map);
1390 pa_assert(pa_source_flat_volume_enabled(s));
1392 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1393 pa_cvolume remapped;
1395 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1396 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1398 /* Ignore this output. The origin source uses volume sharing, so this
1399 * output's volume will be set to be equal to the root source's real
1400 * volume. Obviously this output's current volume must not then
1401 * affect what the root source's real volume will be. */
1405 remapped = o->volume;
1406 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1407 pa_cvolume_merge(max_volume, max_volume, &remapped);
1411 /* Called from main thread. Only called for the root source in volume sharing
1412 * cases, except for internal recursive calls. */
1413 static bool has_outputs(pa_source *s) {
1414 pa_source_output *o;
1417 pa_source_assert_ref(s);
1419 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1420 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1427 /* Called from main thread. Only called for the root source in volume sharing
1428 * cases, except for internal recursive calls. */
1429 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1430 pa_source_output *o;
1433 pa_source_assert_ref(s);
1434 pa_assert(new_volume);
1435 pa_assert(channel_map);
1437 s->real_volume = *new_volume;
1438 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1440 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1441 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1442 if (pa_source_flat_volume_enabled(s)) {
1443 pa_cvolume new_output_volume;
1445 /* Follow the root source's real volume. */
1446 new_output_volume = *new_volume;
1447 pa_cvolume_remap(&new_output_volume, channel_map, &o->channel_map);
1448 pa_source_output_set_volume_direct(o, &new_output_volume);
1449 compute_reference_ratio(o);
1452 update_real_volume(o->destination_source, new_volume, channel_map);
1457 /* Called from main thread. Only called for the root source in shared volume
1459 static void compute_real_volume(pa_source *s) {
1460 pa_source_assert_ref(s);
1461 pa_assert_ctl_context();
1462 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1463 pa_assert(pa_source_flat_volume_enabled(s));
1464 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1466 /* This determines the maximum volume of all streams and sets
1467 * s->real_volume accordingly. */
1469 if (!has_outputs(s)) {
1470 /* In the special case that we have no source outputs we leave the
1471 * volume unmodified. */
1472 update_real_volume(s, &s->reference_volume, &s->channel_map);
1476 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1478 /* First let's determine the new maximum volume of all outputs
1479 * connected to this source */
1480 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1481 update_real_volume(s, &s->real_volume, &s->channel_map);
1483 /* Then, let's update the real ratios/soft volumes of all outputs
1484 * connected to this source */
1485 compute_real_ratios(s);
1488 /* Called from main thread. Only called for the root source in shared volume
1489 * cases, except for internal recursive calls. */
1490 static void propagate_reference_volume(pa_source *s) {
1491 pa_source_output *o;
1494 pa_source_assert_ref(s);
1495 pa_assert_ctl_context();
1496 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1497 pa_assert(pa_source_flat_volume_enabled(s));
1499 /* This is called whenever the source volume changes that is not
1500 * caused by a source output volume change. We need to fix up the
1501 * source output volumes accordingly */
1503 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1504 pa_cvolume new_volume;
1506 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1507 propagate_reference_volume(o->destination_source);
1509 /* Since the origin source uses volume sharing, this output's volume
1510 * needs to be updated to match the root source's real volume, but
1511 * that will be done later in update_shared_real_volume(). */
1515 /* This basically calculates:
1517 * o->volume := o->reference_volume * o->reference_ratio */
1519 new_volume = s->reference_volume;
1520 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1521 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1522 pa_source_output_set_volume_direct(o, &new_volume);
1526 /* Called from main thread. Only called for the root source in volume sharing
1527 * cases, except for internal recursive calls. The return value indicates
1528 * whether any reference volume actually changed. */
1529 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1531 bool reference_volume_changed;
1532 pa_source_output *o;
1535 pa_source_assert_ref(s);
1536 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1538 pa_assert(channel_map);
1539 pa_assert(pa_cvolume_valid(v));
1542 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1544 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1545 pa_source_set_reference_volume_direct(s, &volume);
1547 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1549 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1550 /* If the root source's volume doesn't change, then there can't be any
1551 * changes in the other source in the source tree either.
1553 * It's probably theoretically possible that even if the root source's
1554 * volume changes slightly, some filter source doesn't change its volume
1555 * due to rounding errors. If that happens, we still want to propagate
1556 * the changed root source volume to the sources connected to the
1557 * intermediate source that didn't change its volume. This theoretical
1558 * possibility is the reason why we have that !(s->flags &
1559 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1560 * notice even if we returned here false always if
1561 * reference_volume_changed is false. */
1564 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1565 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1566 update_reference_volume(o->destination_source, v, channel_map, false);
1572 /* Called from main thread */
1573 void pa_source_set_volume(
1575 const pa_cvolume *volume,
1579 pa_cvolume new_reference_volume, root_real_volume;
1580 pa_source *root_source;
1582 pa_source_assert_ref(s);
1583 pa_assert_ctl_context();
1584 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1585 pa_assert(!volume || pa_cvolume_valid(volume));
1586 pa_assert(volume || pa_source_flat_volume_enabled(s));
1587 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1589 /* make sure we don't change the volume in PASSTHROUGH mode ...
1590 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1591 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1592 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1596 /* In case of volume sharing, the volume is set for the root source first,
1597 * from which it's then propagated to the sharing sources. */
1598 root_source = pa_source_get_master(s);
1600 if (PA_UNLIKELY(!root_source))
1603 /* As a special exception we accept mono volumes on all sources --
1604 * even on those with more complex channel maps */
1607 if (pa_cvolume_compatible(volume, &s->sample_spec))
1608 new_reference_volume = *volume;
1610 new_reference_volume = s->reference_volume;
1611 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1614 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1616 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1617 if (pa_source_flat_volume_enabled(root_source)) {
1618 /* OK, propagate this volume change back to the outputs */
1619 propagate_reference_volume(root_source);
1621 /* And now recalculate the real volume */
1622 compute_real_volume(root_source);
1624 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1628 /* If volume is NULL we synchronize the source's real and
1629 * reference volumes with the stream volumes. */
1631 pa_assert(pa_source_flat_volume_enabled(root_source));
1633 /* Ok, let's determine the new real volume */
1634 compute_real_volume(root_source);
1636 /* To propagate the reference volume from the filter to the root source,
1637 * we first take the real volume from the root source and remap it to
1638 * match the filter. Then, we merge in the reference volume from the
1639 * filter on top of this, and remap it back to the root source channel
1641 root_real_volume = root_source->real_volume;
1642 /* First we remap root's real volume to filter channel count and map if needed */
1643 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1644 pa_cvolume_remap(&root_real_volume, &root_source->channel_map, &s->channel_map);
1645 /* Then let's 'push' the reference volume if necessary */
1646 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_real_volume);
1647 /* If the source and its root don't have the same number of channels, we need to remap back */
1648 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1649 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1651 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1653 /* Now that the reference volume is updated, we can update the streams'
1654 * reference ratios. */
1655 compute_reference_ratios(root_source);
1658 if (root_source->set_volume) {
1659 /* If we have a function set_volume(), then we do not apply a
1660 * soft volume by default. However, set_volume() is free to
1661 * apply one to root_source->soft_volume */
1663 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1664 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1665 root_source->set_volume(root_source);
1668 /* If we have no function set_volume(), then the soft volume
1669 * becomes the real volume */
1670 root_source->soft_volume = root_source->real_volume;
1672 /* This tells the source that soft volume and/or real volume changed */
1674 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1677 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1678 * Only to be called by source implementor */
1679 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1681 pa_source_assert_ref(s);
1682 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1684 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1685 pa_source_assert_io_context(s);
1687 pa_assert_ctl_context();
1690 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1692 s->soft_volume = *volume;
1694 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1695 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1697 s->thread_info.soft_volume = s->soft_volume;
1700 /* Called from the main thread. Only called for the root source in volume sharing
1701 * cases, except for internal recursive calls. */
1702 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1703 pa_source_output *o;
1706 pa_source_assert_ref(s);
1707 pa_assert(old_real_volume);
1708 pa_assert_ctl_context();
1709 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1711 /* This is called when the hardware's real volume changes due to
1712 * some external event. We copy the real volume into our
1713 * reference volume and then rebuild the stream volumes based on
1714 * i->real_ratio which should stay fixed. */
1716 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1717 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1720 /* 1. Make the real volume the reference volume */
1721 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1724 if (pa_source_flat_volume_enabled(s)) {
1725 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1726 pa_cvolume new_volume;
1728 /* 2. Since the source's reference and real volumes are equal
1729 * now our ratios should be too. */
1730 pa_source_output_set_reference_ratio(o, &o->real_ratio);
1732 /* 3. Recalculate the new stream reference volume based on the
1733 * reference ratio and the sink's reference volume.
1735 * This basically calculates:
1737 * o->volume = s->reference_volume * o->reference_ratio
1739 * This is identical to propagate_reference_volume() */
1740 new_volume = s->reference_volume;
1741 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1742 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1743 pa_source_output_set_volume_direct(o, &new_volume);
1745 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1746 propagate_real_volume(o->destination_source, old_real_volume);
1750 /* Something got changed in the hardware. It probably makes sense
1751 * to save changed hw settings given that hw volume changes not
1752 * triggered by PA are almost certainly done by the user. */
1753 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1754 s->save_volume = true;
1757 /* Called from io thread */
1758 void pa_source_update_volume_and_mute(pa_source *s) {
1760 pa_source_assert_io_context(s);
1762 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1765 /* Called from main thread */
1766 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1767 pa_source_assert_ref(s);
1768 pa_assert_ctl_context();
1769 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1771 if (s->refresh_volume || force_refresh) {
1772 struct pa_cvolume old_real_volume;
1774 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1776 old_real_volume = s->real_volume;
1778 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1781 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1783 update_real_volume(s, &s->real_volume, &s->channel_map);
1784 propagate_real_volume(s, &old_real_volume);
1787 return &s->reference_volume;
1790 /* Called from main thread. In volume sharing cases, only the root source may
1792 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1793 pa_cvolume old_real_volume;
1795 pa_source_assert_ref(s);
1796 pa_assert_ctl_context();
1797 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1798 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1800 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1802 old_real_volume = s->real_volume;
1803 update_real_volume(s, new_real_volume, &s->channel_map);
1804 propagate_real_volume(s, &old_real_volume);
1807 /* Called from main thread */
1808 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1811 pa_source_assert_ref(s);
1812 pa_assert_ctl_context();
1814 old_muted = s->muted;
1816 if (mute == old_muted) {
1817 s->save_muted |= save;
1822 s->save_muted = save;
1824 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute) {
1825 s->set_mute_in_progress = true;
1827 s->set_mute_in_progress = false;
1830 if (!PA_SOURCE_IS_LINKED(s->state))
1833 pa_log_debug("The mute of source %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
1834 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1835 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1836 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_MUTE_CHANGED], s);
1839 /* Called from main thread */
1840 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1842 pa_source_assert_ref(s);
1843 pa_assert_ctl_context();
1844 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1846 if ((s->refresh_muted || force_refresh) && s->get_mute) {
1849 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1850 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
1851 pa_source_mute_changed(s, mute);
1853 if (s->get_mute(s, &mute) >= 0)
1854 pa_source_mute_changed(s, mute);
1861 /* Called from main thread */
1862 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1863 pa_source_assert_ref(s);
1864 pa_assert_ctl_context();
1865 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1867 if (s->set_mute_in_progress)
1870 /* pa_source_set_mute() does this same check, so this may appear redundant,
1871 * but we must have this here also, because the save parameter of
1872 * pa_source_set_mute() would otherwise have unintended side effects
1873 * (saving the mute state when it shouldn't be saved). */
1874 if (new_muted == s->muted)
1877 pa_source_set_mute(s, new_muted, true);
1880 /* Called from main thread */
1881 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1882 pa_source_assert_ref(s);
1883 pa_assert_ctl_context();
1886 pa_proplist_update(s->proplist, mode, p);
1888 if (PA_SOURCE_IS_LINKED(s->state)) {
1889 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1890 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1896 /* Called from main thread */
1897 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1898 void pa_source_set_description(pa_source *s, const char *description) {
1900 pa_source_assert_ref(s);
1901 pa_assert_ctl_context();
1903 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1906 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1908 if (old && description && pa_streq(old, description))
1912 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1914 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1916 if (PA_SOURCE_IS_LINKED(s->state)) {
1917 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1918 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1922 /* Called from main thread */
1923 unsigned pa_source_linked_by(pa_source *s) {
1924 pa_source_assert_ref(s);
1925 pa_assert_ctl_context();
1926 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1928 return pa_idxset_size(s->outputs);
1931 /* Called from main thread */
1932 unsigned pa_source_used_by(pa_source *s) {
1935 pa_source_assert_ref(s);
1936 pa_assert_ctl_context();
1937 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1939 ret = pa_idxset_size(s->outputs);
1940 pa_assert(ret >= s->n_corked);
1942 return ret - s->n_corked;
1945 /* Called from main thread */
1946 unsigned pa_source_check_suspend(pa_source *s) {
1948 pa_source_output *o;
1951 pa_source_assert_ref(s);
1952 pa_assert_ctl_context();
1954 if (!PA_SOURCE_IS_LINKED(s->state))
1959 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1960 pa_source_output_state_t st;
1962 st = pa_source_output_get_state(o);
1964 /* We do not assert here. It is perfectly valid for a source output to
1965 * be in the INIT state (i.e. created, marked done but not yet put)
1966 * and we should not care if it's unlinked as it won't contribute
1967 * towards our busy status.
1969 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1972 if (st == PA_SOURCE_OUTPUT_CORKED)
1975 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1984 /* Called from the IO thread */
1985 static void sync_output_volumes_within_thread(pa_source *s) {
1986 pa_source_output *o;
1989 pa_source_assert_ref(s);
1990 pa_source_assert_io_context(s);
1992 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1993 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1996 o->thread_info.soft_volume = o->soft_volume;
1997 //pa_source_output_request_rewind(o, 0, true, false, false);
2001 /* Called from the IO thread. Only called for the root source in volume sharing
2002 * cases, except for internal recursive calls. */
2003 static void set_shared_volume_within_thread(pa_source *s) {
2004 pa_source_output *o;
2007 pa_source_assert_ref(s);
2009 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2011 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2012 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
2013 set_shared_volume_within_thread(o->destination_source);
2017 /* Called from IO thread, except when it is not */
2018 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2019 pa_source *s = PA_SOURCE(object);
2020 pa_source_assert_ref(s);
2022 switch ((pa_source_message_t) code) {
2024 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2025 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2027 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2029 if (o->direct_on_input) {
2030 o->thread_info.direct_on_input = o->direct_on_input;
2031 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2034 pa_assert(!o->thread_info.attached);
2035 o->thread_info.attached = true;
2040 pa_source_output_set_state_within_thread(o, o->state);
2042 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2043 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2045 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2047 /* We don't just invalidate the requested latency here,
2048 * because if we are in a move we might need to fix up the
2049 * requested latency. */
2050 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2052 /* In flat volume mode we need to update the volume as
2054 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2057 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2058 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2060 pa_source_output_set_state_within_thread(o, o->state);
2065 pa_assert(o->thread_info.attached);
2066 o->thread_info.attached = false;
2068 if (o->thread_info.direct_on_input) {
2069 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2070 o->thread_info.direct_on_input = NULL;
2073 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2074 pa_source_invalidate_requested_latency(s, true);
2076 /* In flat volume mode we need to update the volume as
2078 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2081 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2082 pa_source *root_source = pa_source_get_master(s);
2084 if (PA_LIKELY(root_source))
2085 set_shared_volume_within_thread(root_source);
2090 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2092 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2094 pa_source_volume_change_push(s);
2096 /* Fall through ... */
2098 case PA_SOURCE_MESSAGE_SET_VOLUME:
2100 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2101 s->thread_info.soft_volume = s->soft_volume;
2104 /* Fall through ... */
2106 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2107 sync_output_volumes_within_thread(s);
2110 case PA_SOURCE_MESSAGE_GET_VOLUME:
2112 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2114 pa_source_volume_change_flush(s);
2115 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2118 /* In case source implementor reset SW volume. */
2119 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2120 s->thread_info.soft_volume = s->soft_volume;
2125 case PA_SOURCE_MESSAGE_SET_MUTE:
2127 if (s->thread_info.soft_muted != s->muted) {
2128 s->thread_info.soft_muted = s->muted;
2131 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2136 case PA_SOURCE_MESSAGE_GET_MUTE:
2138 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2139 return s->get_mute(s, userdata);
2143 case PA_SOURCE_MESSAGE_SET_STATE: {
2145 bool suspend_change =
2146 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2147 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2149 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2151 if (suspend_change) {
2152 pa_source_output *o;
2155 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2156 if (o->suspend_within_thread)
2157 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2163 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2165 pa_usec_t *usec = userdata;
2166 *usec = pa_source_get_requested_latency_within_thread(s);
2168 /* Yes, that's right, the IO thread will see -1 when no
2169 * explicit requested latency is configured, the main
2170 * thread will see max_latency */
2171 if (*usec == (pa_usec_t) -1)
2172 *usec = s->thread_info.max_latency;
2177 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2178 pa_usec_t *r = userdata;
2180 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2185 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2186 pa_usec_t *r = userdata;
2188 r[0] = s->thread_info.min_latency;
2189 r[1] = s->thread_info.max_latency;
2194 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2196 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2199 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2201 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2204 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2206 *((size_t*) userdata) = s->thread_info.max_rewind;
2209 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2211 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2214 case PA_SOURCE_MESSAGE_GET_LATENCY:
2216 if (s->monitor_of) {
2217 *((pa_usec_t*) userdata) = 0;
2221 /* Implementors need to overwrite this implementation! */
2224 case PA_SOURCE_MESSAGE_SET_PORT:
2226 pa_assert(userdata);
2228 struct source_message_set_port *msg_data = userdata;
2229 msg_data->ret = s->set_port(s, msg_data->port);
2233 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2234 /* This message is sent from IO-thread and handled in main thread. */
2235 pa_assert_ctl_context();
2237 /* Make sure we're not messing with main thread when no longer linked */
2238 if (!PA_SOURCE_IS_LINKED(s->state))
2241 pa_source_get_volume(s, true);
2242 pa_source_get_mute(s, true);
2245 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2246 s->thread_info.latency_offset = offset;
2249 case PA_SOURCE_MESSAGE_MAX:
2256 /* Called from main thread */
2257 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2262 pa_core_assert_ref(c);
2263 pa_assert_ctl_context();
2264 pa_assert(cause != 0);
2266 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2269 if (source->monitor_of)
2272 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2279 /* Called from IO thread */
2280 void pa_source_detach_within_thread(pa_source *s) {
2281 pa_source_output *o;
2284 pa_source_assert_ref(s);
2285 pa_source_assert_io_context(s);
2286 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2288 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2293 /* Called from IO thread */
2294 void pa_source_attach_within_thread(pa_source *s) {
2295 pa_source_output *o;
2298 pa_source_assert_ref(s);
2299 pa_source_assert_io_context(s);
2300 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2302 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2307 /* Called from IO thread */
2308 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2309 pa_usec_t result = (pa_usec_t) -1;
2310 pa_source_output *o;
2313 pa_source_assert_ref(s);
2314 pa_source_assert_io_context(s);
2316 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2317 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2319 if (s->thread_info.requested_latency_valid)
2320 return s->thread_info.requested_latency;
2322 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2323 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2324 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2325 result = o->thread_info.requested_source_latency;
2327 if (result != (pa_usec_t) -1)
2328 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2330 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2331 /* Only cache this if we are fully set up */
2332 s->thread_info.requested_latency = result;
2333 s->thread_info.requested_latency_valid = true;
2339 /* Called from main thread */
2340 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2343 pa_source_assert_ref(s);
2344 pa_assert_ctl_context();
2345 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2347 if (s->state == PA_SOURCE_SUSPENDED)
2350 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2355 /* Called from IO thread */
2356 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2357 pa_source_output *o;
2360 pa_source_assert_ref(s);
2361 pa_source_assert_io_context(s);
2363 if (max_rewind == s->thread_info.max_rewind)
2366 s->thread_info.max_rewind = max_rewind;
2368 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2369 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2370 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2373 /* Called from main thread */
2374 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2375 pa_source_assert_ref(s);
2376 pa_assert_ctl_context();
2378 if (PA_SOURCE_IS_LINKED(s->state))
2379 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2381 pa_source_set_max_rewind_within_thread(s, max_rewind);
2384 /* Called from IO thread */
2385 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2386 pa_source_output *o;
2389 pa_source_assert_ref(s);
2390 pa_source_assert_io_context(s);
2392 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2393 s->thread_info.requested_latency_valid = false;
2397 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2399 if (s->update_requested_latency)
2400 s->update_requested_latency(s);
2402 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2403 if (o->update_source_requested_latency)
2404 o->update_source_requested_latency(o);
2408 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2411 /* Called from main thread */
2412 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2413 pa_source_assert_ref(s);
2414 pa_assert_ctl_context();
2416 /* min_latency == 0: no limit
2417 * min_latency anything else: specified limit
2419 * Similar for max_latency */
2421 if (min_latency < ABSOLUTE_MIN_LATENCY)
2422 min_latency = ABSOLUTE_MIN_LATENCY;
2424 if (max_latency <= 0 ||
2425 max_latency > ABSOLUTE_MAX_LATENCY)
2426 max_latency = ABSOLUTE_MAX_LATENCY;
2428 pa_assert(min_latency <= max_latency);
2430 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2431 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2432 max_latency == ABSOLUTE_MAX_LATENCY) ||
2433 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2435 if (PA_SOURCE_IS_LINKED(s->state)) {
2441 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2443 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2446 /* Called from main thread */
2447 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2448 pa_source_assert_ref(s);
2449 pa_assert_ctl_context();
2450 pa_assert(min_latency);
2451 pa_assert(max_latency);
2453 if (PA_SOURCE_IS_LINKED(s->state)) {
2454 pa_usec_t r[2] = { 0, 0 };
2456 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2458 *min_latency = r[0];
2459 *max_latency = r[1];
2461 *min_latency = s->thread_info.min_latency;
2462 *max_latency = s->thread_info.max_latency;
2466 /* Called from IO thread, and from main thread before pa_source_put() is called */
2467 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2468 pa_source_assert_ref(s);
2469 pa_source_assert_io_context(s);
2471 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2472 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2473 pa_assert(min_latency <= max_latency);
2475 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2476 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2477 max_latency == ABSOLUTE_MAX_LATENCY) ||
2478 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2481 if (s->thread_info.min_latency == min_latency &&
2482 s->thread_info.max_latency == max_latency)
2485 s->thread_info.min_latency = min_latency;
2486 s->thread_info.max_latency = max_latency;
2488 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2489 pa_source_output *o;
2492 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2493 if (o->update_source_latency_range)
2494 o->update_source_latency_range(o);
2497 pa_source_invalidate_requested_latency(s, false);
2500 /* Called from main thread, before the source is put */
2501 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2502 pa_source_assert_ref(s);
2503 pa_assert_ctl_context();
2505 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2506 pa_assert(latency == 0);
2510 if (latency < ABSOLUTE_MIN_LATENCY)
2511 latency = ABSOLUTE_MIN_LATENCY;
2513 if (latency > ABSOLUTE_MAX_LATENCY)
2514 latency = ABSOLUTE_MAX_LATENCY;
2516 if (PA_SOURCE_IS_LINKED(s->state))
2517 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2519 s->thread_info.fixed_latency = latency;
2522 /* Called from main thread */
2523 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2526 pa_source_assert_ref(s);
2527 pa_assert_ctl_context();
2529 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2532 if (PA_SOURCE_IS_LINKED(s->state))
2533 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2535 latency = s->thread_info.fixed_latency;
2540 /* Called from IO thread */
2541 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2542 pa_source_assert_ref(s);
2543 pa_source_assert_io_context(s);
2545 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2546 pa_assert(latency == 0);
2547 s->thread_info.fixed_latency = 0;
2552 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2553 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2555 if (s->thread_info.fixed_latency == latency)
2558 s->thread_info.fixed_latency = latency;
2560 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2561 pa_source_output *o;
2564 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2565 if (o->update_source_fixed_latency)
2566 o->update_source_fixed_latency(o);
2569 pa_source_invalidate_requested_latency(s, false);
2572 /* Called from main thread */
2573 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2574 pa_source_assert_ref(s);
2576 s->latency_offset = offset;
2578 if (PA_SOURCE_IS_LINKED(s->state))
2579 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2581 s->thread_info.latency_offset = offset;
2584 /* Called from main thread */
2585 size_t pa_source_get_max_rewind(pa_source *s) {
2587 pa_assert_ctl_context();
2588 pa_source_assert_ref(s);
2590 if (!PA_SOURCE_IS_LINKED(s->state))
2591 return s->thread_info.max_rewind;
2593 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2598 /* Called from main context */
2599 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2600 pa_device_port *port;
2603 pa_source_assert_ref(s);
2604 pa_assert_ctl_context();
2607 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2608 return -PA_ERR_NOTIMPLEMENTED;
2612 return -PA_ERR_NOENTITY;
2614 if (!(port = pa_hashmap_get(s->ports, name)))
2615 return -PA_ERR_NOENTITY;
2617 if (s->active_port == port) {
2618 s->save_port = s->save_port || save;
2622 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2623 struct source_message_set_port msg = { .port = port, .ret = 0 };
2624 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2628 ret = s->set_port(s, port);
2631 return -PA_ERR_NOENTITY;
2633 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2635 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2637 s->active_port = port;
2638 s->save_port = save;
2640 pa_source_set_latency_offset(s, s->active_port->latency_offset);
2642 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2647 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2649 /* Called from the IO thread. */
2650 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2651 pa_source_volume_change *c;
2652 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2653 c = pa_xnew(pa_source_volume_change, 1);
2655 PA_LLIST_INIT(pa_source_volume_change, c);
2657 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2661 /* Called from the IO thread. */
2662 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2664 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2668 /* Called from the IO thread. */
2669 void pa_source_volume_change_push(pa_source *s) {
2670 pa_source_volume_change *c = NULL;
2671 pa_source_volume_change *nc = NULL;
2672 pa_source_volume_change *pc = NULL;
2673 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2675 const char *direction = NULL;
2678 nc = pa_source_volume_change_new(s);
2680 /* NOTE: There is already more different volumes in pa_source that I can remember.
2681 * Adding one more volume for HW would get us rid of this, but I am trying
2682 * to survive with the ones we already have. */
2683 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2685 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2686 pa_log_debug("Volume not changing");
2687 pa_source_volume_change_free(nc);
2691 nc->at = pa_source_get_latency_within_thread(s);
2692 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2694 if (s->thread_info.volume_changes_tail) {
2695 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2696 /* If volume is going up let's do it a bit late. If it is going
2697 * down let's do it a bit early. */
2698 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2699 if (nc->at + safety_margin > c->at) {
2700 nc->at += safety_margin;
2705 else if (nc->at - safety_margin > c->at) {
2706 nc->at -= safety_margin;
2714 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2715 nc->at += safety_margin;
2718 nc->at -= safety_margin;
2721 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2724 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2727 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2729 /* We can ignore volume events that came earlier but should happen later than this. */
2730 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
2731 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2732 pa_source_volume_change_free(c);
2735 s->thread_info.volume_changes_tail = nc;
2738 /* Called from the IO thread. */
2739 static void pa_source_volume_change_flush(pa_source *s) {
2740 pa_source_volume_change *c = s->thread_info.volume_changes;
2742 s->thread_info.volume_changes = NULL;
2743 s->thread_info.volume_changes_tail = NULL;
2745 pa_source_volume_change *next = c->next;
2746 pa_source_volume_change_free(c);
2751 /* Called from the IO thread. */
2752 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2758 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2764 pa_assert(s->write_volume);
2766 now = pa_rtclock_now();
2768 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2769 pa_source_volume_change *c = s->thread_info.volume_changes;
2770 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2771 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2772 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2774 s->thread_info.current_hw_volume = c->hw_volume;
2775 pa_source_volume_change_free(c);
2781 if (s->thread_info.volume_changes) {
2783 *usec_to_next = s->thread_info.volume_changes->at - now;
2784 if (pa_log_ratelimit(PA_LOG_DEBUG))
2785 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2790 s->thread_info.volume_changes_tail = NULL;
2795 /* Called from the main thread */
2796 /* Gets the list of formats supported by the source. The members and idxset must
2797 * be freed by the caller. */
2798 pa_idxset* pa_source_get_formats(pa_source *s) {
2803 if (s->get_formats) {
2804 /* Source supports format query, all is good */
2805 ret = s->get_formats(s);
2807 /* Source doesn't support format query, so assume it does PCM */
2808 pa_format_info *f = pa_format_info_new();
2809 f->encoding = PA_ENCODING_PCM;
2811 ret = pa_idxset_new(NULL, NULL);
2812 pa_idxset_put(ret, f, NULL);
2818 /* Called from the main thread */
2819 /* Checks if the source can accept this format */
2820 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2821 pa_idxset *formats = NULL;
2827 formats = pa_source_get_formats(s);
2830 pa_format_info *finfo_device;
2833 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2834 if (pa_format_info_is_compatible(finfo_device, f)) {
2840 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2846 /* Called from the main thread */
2847 /* Calculates the intersection between formats supported by the source and
2848 * in_formats, and returns these, in the order of the source's formats. */
2849 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2850 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2851 pa_format_info *f_source, *f_in;
2856 if (!in_formats || pa_idxset_isempty(in_formats))
2859 source_formats = pa_source_get_formats(s);
2861 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2862 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2863 if (pa_format_info_is_compatible(f_source, f_in))
2864 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2870 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2875 /* Called from the main thread. */
2876 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2877 pa_cvolume old_volume;
2878 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2879 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2884 old_volume = s->reference_volume;
2886 if (pa_cvolume_equal(volume, &old_volume))
2889 s->reference_volume = *volume;
2890 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
2891 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
2892 s->flags & PA_SOURCE_DECIBEL_VOLUME),
2893 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
2894 s->flags & PA_SOURCE_DECIBEL_VOLUME));
2896 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2897 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_VOLUME_CHANGED], s);