2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/flist.h>
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
54 struct pa_source_volume_change {
58 PA_LLIST_FIELDS(pa_source_volume_change);
61 struct source_message_set_port {
66 static void source_free(pa_object *o);
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
75 data->proplist = pa_proplist_new();
76 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
81 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
85 data->name = pa_xstrdup(name);
88 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
91 if ((data->sample_spec_is_set = !!spec))
92 data->sample_spec = *spec;
95 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
98 if ((data->channel_map_is_set = !!map))
99 data->channel_map = *map;
102 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
105 data->alternate_sample_rate_is_set = true;
106 data->alternate_sample_rate = alternate_sample_rate;
109 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
112 if ((data->volume_is_set = !!volume))
113 data->volume = *volume;
116 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
119 data->muted_is_set = true;
120 data->muted = !!mute;
123 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
126 pa_xfree(data->active_port);
127 data->active_port = pa_xstrdup(port);
130 void pa_source_new_data_done(pa_source_new_data *data) {
133 pa_proplist_free(data->proplist);
136 pa_hashmap_free(data->ports);
138 pa_xfree(data->name);
139 pa_xfree(data->active_port);
142 /* Called from main context */
143 static void reset_callbacks(pa_source *s) {
147 s->get_volume = NULL;
148 s->set_volume = NULL;
149 s->write_volume = NULL;
152 s->update_requested_latency = NULL;
154 s->get_formats = NULL;
155 s->update_rate = NULL;
158 /* Called from main context */
159 pa_source* pa_source_new(
161 pa_source_new_data *data,
162 pa_source_flags_t flags) {
166 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
171 pa_assert(data->name);
172 pa_assert_ctl_context();
174 s = pa_msgobject_new(pa_source);
176 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
177 pa_log_debug("Failed to register name %s.", data->name);
182 pa_source_new_data_set_name(data, name);
184 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
186 pa_namereg_unregister(core, name);
190 /* FIXME, need to free s here on failure */
192 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
193 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
195 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
197 if (!data->channel_map_is_set)
198 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
200 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
201 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
203 /* FIXME: There should probably be a general function for checking whether
204 * the source volume is allowed to be set, like there is for source outputs. */
205 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
207 if (!data->volume_is_set) {
208 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
209 data->save_volume = false;
212 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
213 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
215 if (!data->muted_is_set)
219 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
221 pa_device_init_description(data->proplist, data->card);
222 pa_device_init_icon(data->proplist, false);
223 pa_device_init_intended_roles(data->proplist);
225 if (!data->active_port) {
226 pa_device_port *p = pa_device_port_find_best(data->ports);
228 pa_source_new_data_set_port(data, p->name);
231 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
233 pa_namereg_unregister(core, name);
237 s->parent.parent.free = source_free;
238 s->parent.process_msg = pa_source_process_msg;
241 s->state = PA_SOURCE_INIT;
244 s->suspend_cause = data->suspend_cause;
245 pa_source_set_mixer_dirty(s, false);
246 s->name = pa_xstrdup(name);
247 s->proplist = pa_proplist_copy(data->proplist);
248 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
249 s->module = data->module;
250 s->card = data->card;
252 s->priority = pa_device_init_priority(s->proplist);
254 s->sample_spec = data->sample_spec;
255 s->channel_map = data->channel_map;
256 s->default_sample_rate = s->sample_spec.rate;
258 if (data->alternate_sample_rate_is_set)
259 s->alternate_sample_rate = data->alternate_sample_rate;
261 s->alternate_sample_rate = s->core->alternate_sample_rate;
263 if (s->sample_spec.rate == s->alternate_sample_rate) {
264 pa_log_warn("Default and alternate sample rates are the same.");
265 s->alternate_sample_rate = 0;
268 s->outputs = pa_idxset_new(NULL, NULL);
270 s->monitor_of = NULL;
271 s->output_from_master = NULL;
273 s->reference_volume = s->real_volume = data->volume;
274 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
275 s->base_volume = PA_VOLUME_NORM;
276 s->n_volume_steps = PA_VOLUME_NORM+1;
277 s->muted = data->muted;
278 s->refresh_volume = s->refresh_muted = false;
285 /* As a minor optimization we just steal the list instead of
287 s->ports = data->ports;
290 s->active_port = NULL;
291 s->save_port = false;
293 if (data->active_port)
294 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
295 s->save_port = data->save_port;
297 /* Hopefully the active port has already been assigned in the previous call
298 to pa_device_port_find_best, but better safe than sorry */
300 s->active_port = pa_device_port_find_best(s->ports);
303 s->latency_offset = s->active_port->latency_offset;
305 s->latency_offset = 0;
307 s->save_volume = data->save_volume;
308 s->save_muted = data->save_muted;
310 pa_silence_memchunk_get(
311 &core->silence_cache,
317 s->thread_info.rtpoll = NULL;
318 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
319 (pa_free_cb_t) pa_source_output_unref);
320 s->thread_info.soft_volume = s->soft_volume;
321 s->thread_info.soft_muted = s->muted;
322 s->thread_info.state = s->state;
323 s->thread_info.max_rewind = 0;
324 s->thread_info.requested_latency_valid = false;
325 s->thread_info.requested_latency = 0;
326 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
327 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
328 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
330 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
331 s->thread_info.volume_changes_tail = NULL;
332 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
333 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
334 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
335 s->thread_info.latency_offset = s->latency_offset;
337 /* FIXME: This should probably be moved to pa_source_put() */
338 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
341 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
343 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
344 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
347 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
348 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
355 /* Called from main context */
356 static int source_set_state(pa_source *s, pa_source_state_t state) {
359 pa_source_state_t original_state;
362 pa_assert_ctl_context();
364 if (s->state == state)
367 original_state = s->state;
370 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
371 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
374 if ((ret = s->set_state(s, state)) < 0)
378 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
381 s->set_state(s, original_state);
388 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
389 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
393 if (suspend_change) {
397 /* We're suspending or resuming, tell everyone about it */
399 PA_IDXSET_FOREACH(o, s->outputs, idx)
400 if (s->state == PA_SOURCE_SUSPENDED &&
401 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
402 pa_source_output_kill(o);
404 o->suspend(o, state == PA_SOURCE_SUSPENDED);
410 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
416 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
417 pa_source_flags_t flags;
420 pa_assert(!s->write_volume || cb);
424 /* Save the current flags so we can tell if they've changed */
428 /* The source implementor is responsible for setting decibel volume support */
429 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
431 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
432 /* See note below in pa_source_put() about volume sharing and decibel volumes */
433 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
436 /* If the flags have changed after init, let any clients know via a change event */
437 if (s->state != PA_SOURCE_INIT && flags != s->flags)
438 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
441 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
442 pa_source_flags_t flags;
445 pa_assert(!cb || s->set_volume);
447 s->write_volume = cb;
449 /* Save the current flags so we can tell if they've changed */
453 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
455 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
457 /* If the flags have changed after init, let any clients know via a change event */
458 if (s->state != PA_SOURCE_INIT && flags != s->flags)
459 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
462 void pa_source_set_get_mute_callback(pa_source *s, pa_source_get_mute_cb_t cb) {
468 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
469 pa_source_flags_t flags;
475 /* Save the current flags so we can tell if they've changed */
479 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
481 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SOURCE_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
488 static void enable_flat_volume(pa_source *s, bool enable) {
489 pa_source_flags_t flags;
493 /* Always follow the overall user preference here */
494 enable = enable && s->core->flat_volumes;
496 /* Save the current flags so we can tell if they've changed */
500 s->flags |= PA_SOURCE_FLAT_VOLUME;
502 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SOURCE_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
509 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
510 pa_source_flags_t flags;
514 /* Save the current flags so we can tell if they've changed */
518 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
519 enable_flat_volume(s, true);
521 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
522 enable_flat_volume(s, false);
525 /* If the flags have changed after init, let any clients know via a change event */
526 if (s->state != PA_SOURCE_INIT && flags != s->flags)
527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
530 /* Called from main context */
531 void pa_source_put(pa_source *s) {
532 pa_source_assert_ref(s);
533 pa_assert_ctl_context();
535 pa_assert(s->state == PA_SOURCE_INIT);
536 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
538 /* The following fields must be initialized properly when calling _put() */
539 pa_assert(s->asyncmsgq);
540 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
542 /* Generally, flags should be initialized via pa_source_new(). As a
543 * special exception we allow some volume related flags to be set
544 * between _new() and _put() by the callback setter functions above.
546 * Thus we implement a couple safeguards here which ensure the above
547 * setters were used (or at least the implementor made manual changes
548 * in a compatible way).
550 * Note: All of these flags set here can change over the life time
552 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
553 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
554 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
556 /* XXX: Currently decibel volume is disabled for all sources that use volume
557 * sharing. When the master source supports decibel volume, it would be good
558 * to have the flag also in the filter source, but currently we don't do that
559 * so that the flags of the filter source never change when it's moved from
560 * a master source to another. One solution for this problem would be to
561 * remove user-visible volume altogether from filter sources when volume
562 * sharing is used, but the current approach was easier to implement... */
563 /* We always support decibel volumes in software, otherwise we leave it to
564 * the source implementor to set this flag as needed.
566 * Note: This flag can also change over the life time of the source. */
567 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
568 pa_source_enable_decibel_volume(s, true);
570 /* If the source implementor support DB volumes by itself, we should always
571 * try and enable flat volumes too */
572 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
573 enable_flat_volume(s, true);
575 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
576 pa_source *root_source = pa_source_get_master(s);
578 pa_assert(PA_LIKELY(root_source));
580 s->reference_volume = root_source->reference_volume;
581 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
583 s->real_volume = root_source->real_volume;
584 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
586 /* We assume that if the sink implementor changed the default
587 * volume he did so in real_volume, because that is the usual
588 * place where he is supposed to place his changes. */
589 s->reference_volume = s->real_volume;
591 s->thread_info.soft_volume = s->soft_volume;
592 s->thread_info.soft_muted = s->muted;
593 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
595 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
596 || (s->base_volume == PA_VOLUME_NORM
597 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
598 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
599 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
601 if (s->suspend_cause)
602 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
604 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
606 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
607 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
610 /* Called from main context */
611 void pa_source_unlink(pa_source *s) {
613 pa_source_output *o, *j = NULL;
616 pa_assert_ctl_context();
618 /* See pa_sink_unlink() for a couple of comments how this function
621 linked = PA_SOURCE_IS_LINKED(s->state);
624 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
626 if (s->state != PA_SOURCE_UNLINKED)
627 pa_namereg_unregister(s->core, s->name);
628 pa_idxset_remove_by_data(s->core->sources, s, NULL);
631 pa_idxset_remove_by_data(s->card->sources, s, NULL);
633 while ((o = pa_idxset_first(s->outputs, NULL))) {
635 pa_source_output_kill(o);
640 source_set_state(s, PA_SOURCE_UNLINKED);
642 s->state = PA_SOURCE_UNLINKED;
647 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
648 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
652 /* Called from main context */
653 static void source_free(pa_object *o) {
654 pa_source *s = PA_SOURCE(o);
657 pa_assert_ctl_context();
658 pa_assert(pa_source_refcnt(s) == 0);
660 if (PA_SOURCE_IS_LINKED(s->state))
663 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
665 pa_idxset_free(s->outputs, NULL);
666 pa_hashmap_free(s->thread_info.outputs);
668 if (s->silence.memblock)
669 pa_memblock_unref(s->silence.memblock);
675 pa_proplist_free(s->proplist);
678 pa_hashmap_free(s->ports);
683 /* Called from main context, and not while the IO thread is active, please */
684 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
685 pa_source_assert_ref(s);
686 pa_assert_ctl_context();
691 /* Called from main context, and not while the IO thread is active, please */
692 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
693 pa_source_flags_t old_flags;
694 pa_source_output *output;
697 pa_source_assert_ref(s);
698 pa_assert_ctl_context();
700 /* For now, allow only a minimal set of flags to be changed. */
701 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
703 old_flags = s->flags;
704 s->flags = (s->flags & ~mask) | (value & mask);
706 if (s->flags == old_flags)
709 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
710 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
712 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
713 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
714 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
716 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
717 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
719 PA_IDXSET_FOREACH(output, s->outputs, idx) {
720 if (output->destination_source)
721 pa_source_update_flags(output->destination_source, mask, value);
725 /* Called from IO context, or before _put() from main context */
726 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
727 pa_source_assert_ref(s);
728 pa_source_assert_io_context(s);
730 s->thread_info.rtpoll = p;
733 /* Called from main context */
734 int pa_source_update_status(pa_source*s) {
735 pa_source_assert_ref(s);
736 pa_assert_ctl_context();
737 pa_assert(PA_SOURCE_IS_LINKED(s->state));
739 if (s->state == PA_SOURCE_SUSPENDED)
742 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
745 /* Called from any context - must be threadsafe */
746 void pa_source_set_mixer_dirty(pa_source *s, bool is_dirty) {
747 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
750 /* Called from main context */
751 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
752 pa_source_assert_ref(s);
753 pa_assert_ctl_context();
754 pa_assert(PA_SOURCE_IS_LINKED(s->state));
755 pa_assert(cause != 0);
757 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
758 return -PA_ERR_NOTSUPPORTED;
761 s->suspend_cause |= cause;
763 s->suspend_cause &= ~cause;
765 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
766 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
767 it'll be handled just fine. */
768 pa_source_set_mixer_dirty(s, false);
769 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
770 if (s->active_port && s->set_port) {
771 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
772 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
773 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
776 s->set_port(s, s->active_port);
786 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
789 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
791 if (s->suspend_cause)
792 return source_set_state(s, PA_SOURCE_SUSPENDED);
794 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
797 /* Called from main context */
798 int pa_source_sync_suspend(pa_source *s) {
799 pa_sink_state_t state;
801 pa_source_assert_ref(s);
802 pa_assert_ctl_context();
803 pa_assert(PA_SOURCE_IS_LINKED(s->state));
804 pa_assert(s->monitor_of);
806 state = pa_sink_get_state(s->monitor_of);
808 if (state == PA_SINK_SUSPENDED)
809 return source_set_state(s, PA_SOURCE_SUSPENDED);
811 pa_assert(PA_SINK_IS_OPENED(state));
813 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
816 /* Called from main context */
817 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
818 pa_source_output *o, *n;
821 pa_source_assert_ref(s);
822 pa_assert_ctl_context();
823 pa_assert(PA_SOURCE_IS_LINKED(s->state));
828 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
829 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
831 pa_source_output_ref(o);
833 if (pa_source_output_start_move(o) >= 0)
836 pa_source_output_unref(o);
842 /* Called from main context */
843 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
846 pa_source_assert_ref(s);
847 pa_assert_ctl_context();
848 pa_assert(PA_SOURCE_IS_LINKED(s->state));
851 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
852 if (pa_source_output_finish_move(o, s, save) < 0)
853 pa_source_output_fail_move(o);
855 pa_source_output_unref(o);
858 pa_queue_free(q, NULL);
861 /* Called from main context */
862 void pa_source_move_all_fail(pa_queue *q) {
865 pa_assert_ctl_context();
868 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
869 pa_source_output_fail_move(o);
870 pa_source_output_unref(o);
873 pa_queue_free(q, NULL);
876 /* Called from IO thread context */
877 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
881 pa_source_assert_ref(s);
882 pa_source_assert_io_context(s);
883 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
888 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
891 pa_log_debug("Processing rewind...");
893 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
894 pa_source_output_assert_ref(o);
895 pa_source_output_process_rewind(o, nbytes);
899 /* Called from IO thread context */
900 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
904 pa_source_assert_ref(s);
905 pa_source_assert_io_context(s);
906 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
909 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
912 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
913 pa_memchunk vchunk = *chunk;
915 pa_memblock_ref(vchunk.memblock);
916 pa_memchunk_make_writable(&vchunk, 0);
918 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
919 pa_silence_memchunk(&vchunk, &s->sample_spec);
921 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
923 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
924 pa_source_output_assert_ref(o);
926 if (!o->thread_info.direct_on_input)
927 pa_source_output_push(o, &vchunk);
930 pa_memblock_unref(vchunk.memblock);
933 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
934 pa_source_output_assert_ref(o);
936 if (!o->thread_info.direct_on_input)
937 pa_source_output_push(o, chunk);
942 /* Called from IO thread context */
943 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
944 pa_source_assert_ref(s);
945 pa_source_assert_io_context(s);
946 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
947 pa_source_output_assert_ref(o);
948 pa_assert(o->thread_info.direct_on_input);
951 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
954 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
955 pa_memchunk vchunk = *chunk;
957 pa_memblock_ref(vchunk.memblock);
958 pa_memchunk_make_writable(&vchunk, 0);
960 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
961 pa_silence_memchunk(&vchunk, &s->sample_spec);
963 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
965 pa_source_output_push(o, &vchunk);
967 pa_memblock_unref(vchunk.memblock);
969 pa_source_output_push(o, chunk);
972 /* Called from main thread */
973 int pa_source_update_rate(pa_source *s, uint32_t rate, bool passthrough) {
975 uint32_t desired_rate = rate;
976 uint32_t default_rate = s->default_sample_rate;
977 uint32_t alternate_rate = s->alternate_sample_rate;
978 bool use_alternate = false;
980 if (rate == s->sample_spec.rate)
983 if (!s->update_rate && !s->monitor_of)
986 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
987 pa_log_debug("Default and alternate sample rates are the same.");
991 if (PA_SOURCE_IS_RUNNING(s->state)) {
992 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
993 s->sample_spec.rate);
998 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
999 pa_log_info("Cannot update rate, this is a monitor source and the sink is running.");
1004 if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
1008 pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
1009 pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
1011 if (default_rate % 11025 == 0) {
1012 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1015 /* default is 4000 multiple */
1016 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1021 desired_rate = alternate_rate;
1023 desired_rate = default_rate;
1025 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1028 if (desired_rate == s->sample_spec.rate)
1031 if (!passthrough && pa_source_used_by(s) > 0)
1034 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1035 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1038 ret = s->update_rate(s, desired_rate);
1040 /* This is a monitor source. */
1042 /* XXX: This code is written with non-passthrough streams in mind. I
1043 * have no idea whether the behaviour with passthrough streams is
1046 uint32_t old_rate = s->sample_spec.rate;
1048 s->sample_spec.rate = desired_rate;
1049 ret = pa_sink_update_rate(s->monitor_of, desired_rate, false);
1052 /* Changing the sink rate failed, roll back the old rate for
1053 * the monitor source. Why did we set the source rate before
1054 * calling pa_sink_update_rate(), you may ask. The reason is
1055 * that pa_sink_update_rate() tries to update the monitor
1056 * source rate, but we are already in the process of updating
1057 * the monitor source rate, so there's a risk of entering an
1058 * infinite loop. Setting the source rate before calling
1059 * pa_sink_update_rate() makes the rate == s->sample_spec.rate
1060 * check in the beginning of this function return early, so we
1062 s->sample_spec.rate = old_rate;
1070 pa_source_output *o;
1072 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1073 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1074 pa_source_output_update_rate(o);
1077 pa_log_info("Changed sampling rate successfully");
1080 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1085 /* Called from main thread */
1086 pa_usec_t pa_source_get_latency(pa_source *s) {
1089 pa_source_assert_ref(s);
1090 pa_assert_ctl_context();
1091 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1093 if (s->state == PA_SOURCE_SUSPENDED)
1096 if (!(s->flags & PA_SOURCE_LATENCY))
1099 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1101 /* usec is unsigned, so check that the offset can be added to usec without
1103 if (-s->latency_offset <= (int64_t) usec)
1104 usec += s->latency_offset;
1111 /* Called from IO thread */
1112 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1116 pa_source_assert_ref(s);
1117 pa_source_assert_io_context(s);
1118 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1120 /* The returned value is supposed to be in the time domain of the sound card! */
1122 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1125 if (!(s->flags & PA_SOURCE_LATENCY))
1128 o = PA_MSGOBJECT(s);
1130 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1132 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1135 /* usec is unsigned, so check that the offset can be added to usec without
1137 if (-s->thread_info.latency_offset <= (int64_t) usec)
1138 usec += s->thread_info.latency_offset;
1145 /* Called from the main thread (and also from the IO thread while the main
1146 * thread is waiting).
1148 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1149 * set. Instead, flat volume mode is detected by checking whether the root source
1150 * has the flag set. */
1151 bool pa_source_flat_volume_enabled(pa_source *s) {
1152 pa_source_assert_ref(s);
1154 s = pa_source_get_master(s);
1157 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1162 /* Called from the main thread (and also from the IO thread while the main
1163 * thread is waiting). */
1164 pa_source *pa_source_get_master(pa_source *s) {
1165 pa_source_assert_ref(s);
1167 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1168 if (PA_UNLIKELY(!s->output_from_master))
1171 s = s->output_from_master->source;
1177 /* Called from main context */
1178 bool pa_source_is_passthrough(pa_source *s) {
1180 pa_source_assert_ref(s);
1182 /* NB Currently only monitor sources support passthrough mode */
1183 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1186 /* Called from main context */
1187 void pa_source_enter_passthrough(pa_source *s) {
1190 /* set the volume to NORM */
1191 s->saved_volume = *pa_source_get_volume(s, true);
1192 s->saved_save_volume = s->save_volume;
1194 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1195 pa_source_set_volume(s, &volume, true, false);
1198 /* Called from main context */
1199 void pa_source_leave_passthrough(pa_source *s) {
1200 /* Restore source volume to what it was before we entered passthrough mode */
1201 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1203 pa_cvolume_init(&s->saved_volume);
1204 s->saved_save_volume = false;
1207 /* Called from main context. */
1208 static void compute_reference_ratio(pa_source_output *o) {
1210 pa_cvolume remapped;
1213 pa_assert(pa_source_flat_volume_enabled(o->source));
1216 * Calculates the reference ratio from the source's reference
1217 * volume. This basically calculates:
1219 * o->reference_ratio = o->volume / o->source->reference_volume
1222 remapped = o->source->reference_volume;
1223 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1225 o->reference_ratio.channels = o->sample_spec.channels;
1227 for (c = 0; c < o->sample_spec.channels; c++) {
1229 /* We don't update when the source volume is 0 anyway */
1230 if (remapped.values[c] <= PA_VOLUME_MUTED)
1233 /* Don't update the reference ratio unless necessary */
1234 if (pa_sw_volume_multiply(
1235 o->reference_ratio.values[c],
1236 remapped.values[c]) == o->volume.values[c])
1239 o->reference_ratio.values[c] = pa_sw_volume_divide(
1240 o->volume.values[c],
1241 remapped.values[c]);
1245 /* Called from main context. Only called for the root source in volume sharing
1246 * cases, except for internal recursive calls. */
1247 static void compute_reference_ratios(pa_source *s) {
1249 pa_source_output *o;
1251 pa_source_assert_ref(s);
1252 pa_assert_ctl_context();
1253 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1254 pa_assert(pa_source_flat_volume_enabled(s));
1256 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1257 compute_reference_ratio(o);
1259 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1260 compute_reference_ratios(o->destination_source);
1264 /* Called from main context. Only called for the root source in volume sharing
1265 * cases, except for internal recursive calls. */
1266 static void compute_real_ratios(pa_source *s) {
1267 pa_source_output *o;
1270 pa_source_assert_ref(s);
1271 pa_assert_ctl_context();
1272 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1273 pa_assert(pa_source_flat_volume_enabled(s));
1275 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1277 pa_cvolume remapped;
1279 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1280 /* The origin source uses volume sharing, so this input's real ratio
1281 * is handled as a special case - the real ratio must be 0 dB, and
1282 * as a result i->soft_volume must equal i->volume_factor. */
1283 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1284 o->soft_volume = o->volume_factor;
1286 compute_real_ratios(o->destination_source);
1292 * This basically calculates:
1294 * i->real_ratio := i->volume / s->real_volume
1295 * i->soft_volume := i->real_ratio * i->volume_factor
1298 remapped = s->real_volume;
1299 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1301 o->real_ratio.channels = o->sample_spec.channels;
1302 o->soft_volume.channels = o->sample_spec.channels;
1304 for (c = 0; c < o->sample_spec.channels; c++) {
1306 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1307 /* We leave o->real_ratio untouched */
1308 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1312 /* Don't lose accuracy unless necessary */
1313 if (pa_sw_volume_multiply(
1314 o->real_ratio.values[c],
1315 remapped.values[c]) != o->volume.values[c])
1317 o->real_ratio.values[c] = pa_sw_volume_divide(
1318 o->volume.values[c],
1319 remapped.values[c]);
1321 o->soft_volume.values[c] = pa_sw_volume_multiply(
1322 o->real_ratio.values[c],
1323 o->volume_factor.values[c]);
1326 /* We don't copy the soft_volume to the thread_info data
1327 * here. That must be done by the caller */
1331 static pa_cvolume *cvolume_remap_minimal_impact(
1333 const pa_cvolume *template,
1334 const pa_channel_map *from,
1335 const pa_channel_map *to) {
1340 pa_assert(template);
1343 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1344 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1346 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1347 * mapping from source output to source volumes:
1349 * If template is a possible remapping from v it is used instead
1350 * of remapping anew.
1352 * If the channel maps don't match we set an all-channel volume on
1353 * the source to ensure that changing a volume on one stream has no
1354 * effect that cannot be compensated for in another stream that
1355 * does not have the same channel map as the source. */
1357 if (pa_channel_map_equal(from, to))
1361 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1366 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1370 /* Called from main thread. Only called for the root source in volume sharing
1371 * cases, except for internal recursive calls. */
1372 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1373 pa_source_output *o;
1376 pa_source_assert_ref(s);
1377 pa_assert(max_volume);
1378 pa_assert(channel_map);
1379 pa_assert(pa_source_flat_volume_enabled(s));
1381 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1382 pa_cvolume remapped;
1384 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1385 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1387 /* Ignore this output. The origin source uses volume sharing, so this
1388 * output's volume will be set to be equal to the root source's real
1389 * volume. Obviously this output's current volume must not then
1390 * affect what the root source's real volume will be. */
1394 remapped = o->volume;
1395 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1396 pa_cvolume_merge(max_volume, max_volume, &remapped);
1400 /* Called from main thread. Only called for the root source in volume sharing
1401 * cases, except for internal recursive calls. */
1402 static bool has_outputs(pa_source *s) {
1403 pa_source_output *o;
1406 pa_source_assert_ref(s);
1408 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1409 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1416 /* Called from main thread. Only called for the root source in volume sharing
1417 * cases, except for internal recursive calls. */
1418 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1419 pa_source_output *o;
1422 pa_source_assert_ref(s);
1423 pa_assert(new_volume);
1424 pa_assert(channel_map);
1426 s->real_volume = *new_volume;
1427 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1429 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1430 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1431 if (pa_source_flat_volume_enabled(s)) {
1432 pa_cvolume new_output_volume;
1434 /* Follow the root source's real volume. */
1435 new_output_volume = *new_volume;
1436 pa_cvolume_remap(&new_output_volume, channel_map, &o->channel_map);
1437 pa_source_output_set_volume_direct(o, &new_output_volume);
1438 compute_reference_ratio(o);
1441 update_real_volume(o->destination_source, new_volume, channel_map);
1446 /* Called from main thread. Only called for the root source in shared volume
1448 static void compute_real_volume(pa_source *s) {
1449 pa_source_assert_ref(s);
1450 pa_assert_ctl_context();
1451 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1452 pa_assert(pa_source_flat_volume_enabled(s));
1453 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1455 /* This determines the maximum volume of all streams and sets
1456 * s->real_volume accordingly. */
1458 if (!has_outputs(s)) {
1459 /* In the special case that we have no source outputs we leave the
1460 * volume unmodified. */
1461 update_real_volume(s, &s->reference_volume, &s->channel_map);
1465 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1467 /* First let's determine the new maximum volume of all outputs
1468 * connected to this source */
1469 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1470 update_real_volume(s, &s->real_volume, &s->channel_map);
1472 /* Then, let's update the real ratios/soft volumes of all outputs
1473 * connected to this source */
1474 compute_real_ratios(s);
1477 /* Called from main thread. Only called for the root source in shared volume
1478 * cases, except for internal recursive calls. */
1479 static void propagate_reference_volume(pa_source *s) {
1480 pa_source_output *o;
1483 pa_source_assert_ref(s);
1484 pa_assert_ctl_context();
1485 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1486 pa_assert(pa_source_flat_volume_enabled(s));
1488 /* This is called whenever the source volume changes that is not
1489 * caused by a source output volume change. We need to fix up the
1490 * source output volumes accordingly */
1492 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1493 pa_cvolume new_volume;
1495 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1496 propagate_reference_volume(o->destination_source);
1498 /* Since the origin source uses volume sharing, this output's volume
1499 * needs to be updated to match the root source's real volume, but
1500 * that will be done later in update_shared_real_volume(). */
1504 /* This basically calculates:
1506 * o->volume := o->reference_volume * o->reference_ratio */
1508 new_volume = s->reference_volume;
1509 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1510 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1511 pa_source_output_set_volume_direct(o, &new_volume);
1515 /* Called from main thread. Only called for the root source in volume sharing
1516 * cases, except for internal recursive calls. The return value indicates
1517 * whether any reference volume actually changed. */
1518 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1520 bool reference_volume_changed;
1521 pa_source_output *o;
1524 pa_source_assert_ref(s);
1525 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1527 pa_assert(channel_map);
1528 pa_assert(pa_cvolume_valid(v));
1531 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1533 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1534 pa_source_set_reference_volume_direct(s, &volume);
1536 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1538 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1539 /* If the root source's volume doesn't change, then there can't be any
1540 * changes in the other source in the source tree either.
1542 * It's probably theoretically possible that even if the root source's
1543 * volume changes slightly, some filter source doesn't change its volume
1544 * due to rounding errors. If that happens, we still want to propagate
1545 * the changed root source volume to the sources connected to the
1546 * intermediate source that didn't change its volume. This theoretical
1547 * possibility is the reason why we have that !(s->flags &
1548 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1549 * notice even if we returned here false always if
1550 * reference_volume_changed is false. */
1553 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1554 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1555 update_reference_volume(o->destination_source, v, channel_map, false);
1561 /* Called from main thread */
1562 void pa_source_set_volume(
1564 const pa_cvolume *volume,
1568 pa_cvolume new_reference_volume;
1569 pa_source *root_source;
1571 pa_source_assert_ref(s);
1572 pa_assert_ctl_context();
1573 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1574 pa_assert(!volume || pa_cvolume_valid(volume));
1575 pa_assert(volume || pa_source_flat_volume_enabled(s));
1576 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1578 /* make sure we don't change the volume in PASSTHROUGH mode ...
1579 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1580 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1581 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1585 /* In case of volume sharing, the volume is set for the root source first,
1586 * from which it's then propagated to the sharing sources. */
1587 root_source = pa_source_get_master(s);
1589 if (PA_UNLIKELY(!root_source))
1592 /* As a special exception we accept mono volumes on all sources --
1593 * even on those with more complex channel maps */
1596 if (pa_cvolume_compatible(volume, &s->sample_spec))
1597 new_reference_volume = *volume;
1599 new_reference_volume = s->reference_volume;
1600 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1603 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1605 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1606 if (pa_source_flat_volume_enabled(root_source)) {
1607 /* OK, propagate this volume change back to the outputs */
1608 propagate_reference_volume(root_source);
1610 /* And now recalculate the real volume */
1611 compute_real_volume(root_source);
1613 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1617 /* If volume is NULL we synchronize the source's real and
1618 * reference volumes with the stream volumes. */
1620 pa_assert(pa_source_flat_volume_enabled(root_source));
1622 /* Ok, let's determine the new real volume */
1623 compute_real_volume(root_source);
1625 /* Let's 'push' the reference volume if necessary */
1626 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1627 /* If the source and its root don't have the same number of channels, we need to remap */
1628 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1629 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1630 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1632 /* Now that the reference volume is updated, we can update the streams'
1633 * reference ratios. */
1634 compute_reference_ratios(root_source);
1637 if (root_source->set_volume) {
1638 /* If we have a function set_volume(), then we do not apply a
1639 * soft volume by default. However, set_volume() is free to
1640 * apply one to root_source->soft_volume */
1642 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1643 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1644 root_source->set_volume(root_source);
1647 /* If we have no function set_volume(), then the soft volume
1648 * becomes the real volume */
1649 root_source->soft_volume = root_source->real_volume;
1651 /* This tells the source that soft volume and/or real volume changed */
1653 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1656 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1657 * Only to be called by source implementor */
1658 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1660 pa_source_assert_ref(s);
1661 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1663 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1664 pa_source_assert_io_context(s);
1666 pa_assert_ctl_context();
1669 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1671 s->soft_volume = *volume;
1673 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1674 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1676 s->thread_info.soft_volume = s->soft_volume;
1679 /* Called from the main thread. Only called for the root source in volume sharing
1680 * cases, except for internal recursive calls. */
1681 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1682 pa_source_output *o;
1685 pa_source_assert_ref(s);
1686 pa_assert(old_real_volume);
1687 pa_assert_ctl_context();
1688 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1690 /* This is called when the hardware's real volume changes due to
1691 * some external event. We copy the real volume into our
1692 * reference volume and then rebuild the stream volumes based on
1693 * i->real_ratio which should stay fixed. */
1695 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1696 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1699 /* 1. Make the real volume the reference volume */
1700 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1703 if (pa_source_flat_volume_enabled(s)) {
1704 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1705 pa_cvolume new_volume;
1707 /* 2. Since the source's reference and real volumes are equal
1708 * now our ratios should be too. */
1709 o->reference_ratio = o->real_ratio;
1711 /* 3. Recalculate the new stream reference volume based on the
1712 * reference ratio and the sink's reference volume.
1714 * This basically calculates:
1716 * o->volume = s->reference_volume * o->reference_ratio
1718 * This is identical to propagate_reference_volume() */
1719 new_volume = s->reference_volume;
1720 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1721 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1722 pa_source_output_set_volume_direct(o, &new_volume);
1724 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1725 propagate_real_volume(o->destination_source, old_real_volume);
1729 /* Something got changed in the hardware. It probably makes sense
1730 * to save changed hw settings given that hw volume changes not
1731 * triggered by PA are almost certainly done by the user. */
1732 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1733 s->save_volume = true;
1736 /* Called from io thread */
1737 void pa_source_update_volume_and_mute(pa_source *s) {
1739 pa_source_assert_io_context(s);
1741 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1744 /* Called from main thread */
1745 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1746 pa_source_assert_ref(s);
1747 pa_assert_ctl_context();
1748 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1750 if (s->refresh_volume || force_refresh) {
1751 struct pa_cvolume old_real_volume;
1753 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1755 old_real_volume = s->real_volume;
1757 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1760 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1762 update_real_volume(s, &s->real_volume, &s->channel_map);
1763 propagate_real_volume(s, &old_real_volume);
1766 return &s->reference_volume;
1769 /* Called from main thread. In volume sharing cases, only the root source may
1771 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1772 pa_cvolume old_real_volume;
1774 pa_source_assert_ref(s);
1775 pa_assert_ctl_context();
1776 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1777 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1779 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1781 old_real_volume = s->real_volume;
1782 update_real_volume(s, new_real_volume, &s->channel_map);
1783 propagate_real_volume(s, &old_real_volume);
1786 /* Called from main thread */
1787 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1790 pa_source_assert_ref(s);
1791 pa_assert_ctl_context();
1793 old_muted = s->muted;
1795 if (mute == old_muted) {
1796 s->save_muted |= save;
1801 s->save_muted = save;
1803 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute) {
1804 s->set_mute_in_progress = true;
1806 s->set_mute_in_progress = false;
1809 if (!PA_SOURCE_IS_LINKED(s->state))
1812 pa_log_debug("The mute of source %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
1813 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1814 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1815 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_MUTE_CHANGED], s);
1818 /* Called from main thread */
1819 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1821 pa_source_assert_ref(s);
1822 pa_assert_ctl_context();
1823 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1825 if ((s->refresh_muted || force_refresh) && s->get_mute) {
1828 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1829 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
1830 pa_source_mute_changed(s, mute);
1832 if (s->get_mute(s, &mute) >= 0)
1833 pa_source_mute_changed(s, mute);
1840 /* Called from main thread */
1841 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1842 pa_source_assert_ref(s);
1843 pa_assert_ctl_context();
1844 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1846 if (s->set_mute_in_progress)
1849 /* pa_source_set_mute() does this same check, so this may appear redundant,
1850 * but we must have this here also, because the save parameter of
1851 * pa_source_set_mute() would otherwise have unintended side effects
1852 * (saving the mute state when it shouldn't be saved). */
1853 if (new_muted == s->muted)
1856 pa_source_set_mute(s, new_muted, true);
1859 /* Called from main thread */
1860 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1861 pa_source_assert_ref(s);
1862 pa_assert_ctl_context();
1865 pa_proplist_update(s->proplist, mode, p);
1867 if (PA_SOURCE_IS_LINKED(s->state)) {
1868 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1869 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1875 /* Called from main thread */
1876 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1877 void pa_source_set_description(pa_source *s, const char *description) {
1879 pa_source_assert_ref(s);
1880 pa_assert_ctl_context();
1882 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1885 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1887 if (old && description && pa_streq(old, description))
1891 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1893 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1895 if (PA_SOURCE_IS_LINKED(s->state)) {
1896 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1897 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1901 /* Called from main thread */
1902 unsigned pa_source_linked_by(pa_source *s) {
1903 pa_source_assert_ref(s);
1904 pa_assert_ctl_context();
1905 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1907 return pa_idxset_size(s->outputs);
1910 /* Called from main thread */
1911 unsigned pa_source_used_by(pa_source *s) {
1914 pa_source_assert_ref(s);
1915 pa_assert_ctl_context();
1916 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1918 ret = pa_idxset_size(s->outputs);
1919 pa_assert(ret >= s->n_corked);
1921 return ret - s->n_corked;
1924 /* Called from main thread */
1925 unsigned pa_source_check_suspend(pa_source *s) {
1927 pa_source_output *o;
1930 pa_source_assert_ref(s);
1931 pa_assert_ctl_context();
1933 if (!PA_SOURCE_IS_LINKED(s->state))
1938 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1939 pa_source_output_state_t st;
1941 st = pa_source_output_get_state(o);
1943 /* We do not assert here. It is perfectly valid for a source output to
1944 * be in the INIT state (i.e. created, marked done but not yet put)
1945 * and we should not care if it's unlinked as it won't contribute
1946 * towards our busy status.
1948 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1951 if (st == PA_SOURCE_OUTPUT_CORKED)
1954 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1963 /* Called from the IO thread */
1964 static void sync_output_volumes_within_thread(pa_source *s) {
1965 pa_source_output *o;
1968 pa_source_assert_ref(s);
1969 pa_source_assert_io_context(s);
1971 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1972 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1975 o->thread_info.soft_volume = o->soft_volume;
1976 //pa_source_output_request_rewind(o, 0, true, false, false);
1980 /* Called from the IO thread. Only called for the root source in volume sharing
1981 * cases, except for internal recursive calls. */
1982 static void set_shared_volume_within_thread(pa_source *s) {
1983 pa_source_output *o;
1986 pa_source_assert_ref(s);
1988 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1990 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1991 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1992 set_shared_volume_within_thread(o->destination_source);
1996 /* Called from IO thread, except when it is not */
1997 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1998 pa_source *s = PA_SOURCE(object);
1999 pa_source_assert_ref(s);
2001 switch ((pa_source_message_t) code) {
2003 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2004 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2006 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2008 if (o->direct_on_input) {
2009 o->thread_info.direct_on_input = o->direct_on_input;
2010 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2013 pa_assert(!o->thread_info.attached);
2014 o->thread_info.attached = true;
2019 pa_source_output_set_state_within_thread(o, o->state);
2021 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2022 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2024 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2026 /* We don't just invalidate the requested latency here,
2027 * because if we are in a move we might need to fix up the
2028 * requested latency. */
2029 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2031 /* In flat volume mode we need to update the volume as
2033 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2036 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2037 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2039 pa_source_output_set_state_within_thread(o, o->state);
2044 pa_assert(o->thread_info.attached);
2045 o->thread_info.attached = false;
2047 if (o->thread_info.direct_on_input) {
2048 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2049 o->thread_info.direct_on_input = NULL;
2052 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2053 pa_source_invalidate_requested_latency(s, true);
2055 /* In flat volume mode we need to update the volume as
2057 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2060 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2061 pa_source *root_source = pa_source_get_master(s);
2063 if (PA_LIKELY(root_source))
2064 set_shared_volume_within_thread(root_source);
2069 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2071 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2073 pa_source_volume_change_push(s);
2075 /* Fall through ... */
2077 case PA_SOURCE_MESSAGE_SET_VOLUME:
2079 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2080 s->thread_info.soft_volume = s->soft_volume;
2083 /* Fall through ... */
2085 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2086 sync_output_volumes_within_thread(s);
2089 case PA_SOURCE_MESSAGE_GET_VOLUME:
2091 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2093 pa_source_volume_change_flush(s);
2094 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2097 /* In case source implementor reset SW volume. */
2098 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2099 s->thread_info.soft_volume = s->soft_volume;
2104 case PA_SOURCE_MESSAGE_SET_MUTE:
2106 if (s->thread_info.soft_muted != s->muted) {
2107 s->thread_info.soft_muted = s->muted;
2110 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2115 case PA_SOURCE_MESSAGE_GET_MUTE:
2117 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2118 return s->get_mute(s, userdata);
2122 case PA_SOURCE_MESSAGE_SET_STATE: {
2124 bool suspend_change =
2125 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2126 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2128 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2130 if (suspend_change) {
2131 pa_source_output *o;
2134 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2135 if (o->suspend_within_thread)
2136 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2142 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2144 pa_usec_t *usec = userdata;
2145 *usec = pa_source_get_requested_latency_within_thread(s);
2147 /* Yes, that's right, the IO thread will see -1 when no
2148 * explicit requested latency is configured, the main
2149 * thread will see max_latency */
2150 if (*usec == (pa_usec_t) -1)
2151 *usec = s->thread_info.max_latency;
2156 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2157 pa_usec_t *r = userdata;
2159 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2164 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2165 pa_usec_t *r = userdata;
2167 r[0] = s->thread_info.min_latency;
2168 r[1] = s->thread_info.max_latency;
2173 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2175 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2178 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2180 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2183 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2185 *((size_t*) userdata) = s->thread_info.max_rewind;
2188 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2190 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2193 case PA_SOURCE_MESSAGE_GET_LATENCY:
2195 if (s->monitor_of) {
2196 *((pa_usec_t*) userdata) = 0;
2200 /* Implementors need to overwrite this implementation! */
2203 case PA_SOURCE_MESSAGE_SET_PORT:
2205 pa_assert(userdata);
2207 struct source_message_set_port *msg_data = userdata;
2208 msg_data->ret = s->set_port(s, msg_data->port);
2212 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2213 /* This message is sent from IO-thread and handled in main thread. */
2214 pa_assert_ctl_context();
2216 /* Make sure we're not messing with main thread when no longer linked */
2217 if (!PA_SOURCE_IS_LINKED(s->state))
2220 pa_source_get_volume(s, true);
2221 pa_source_get_mute(s, true);
2224 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2225 s->thread_info.latency_offset = offset;
2228 case PA_SOURCE_MESSAGE_MAX:
2235 /* Called from main thread */
2236 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2241 pa_core_assert_ref(c);
2242 pa_assert_ctl_context();
2243 pa_assert(cause != 0);
2245 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2248 if (source->monitor_of)
2251 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2258 /* Called from IO thread */
2259 void pa_source_detach_within_thread(pa_source *s) {
2260 pa_source_output *o;
2263 pa_source_assert_ref(s);
2264 pa_source_assert_io_context(s);
2265 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2267 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2272 /* Called from IO thread */
2273 void pa_source_attach_within_thread(pa_source *s) {
2274 pa_source_output *o;
2277 pa_source_assert_ref(s);
2278 pa_source_assert_io_context(s);
2279 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2281 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2286 /* Called from IO thread */
2287 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2288 pa_usec_t result = (pa_usec_t) -1;
2289 pa_source_output *o;
2292 pa_source_assert_ref(s);
2293 pa_source_assert_io_context(s);
2295 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2296 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2298 if (s->thread_info.requested_latency_valid)
2299 return s->thread_info.requested_latency;
2301 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2302 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2303 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2304 result = o->thread_info.requested_source_latency;
2306 if (result != (pa_usec_t) -1)
2307 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2309 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2310 /* Only cache this if we are fully set up */
2311 s->thread_info.requested_latency = result;
2312 s->thread_info.requested_latency_valid = true;
2318 /* Called from main thread */
2319 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2322 pa_source_assert_ref(s);
2323 pa_assert_ctl_context();
2324 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2326 if (s->state == PA_SOURCE_SUSPENDED)
2329 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2334 /* Called from IO thread */
2335 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2336 pa_source_output *o;
2339 pa_source_assert_ref(s);
2340 pa_source_assert_io_context(s);
2342 if (max_rewind == s->thread_info.max_rewind)
2345 s->thread_info.max_rewind = max_rewind;
2347 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2348 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2349 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2352 /* Called from main thread */
2353 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2354 pa_source_assert_ref(s);
2355 pa_assert_ctl_context();
2357 if (PA_SOURCE_IS_LINKED(s->state))
2358 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2360 pa_source_set_max_rewind_within_thread(s, max_rewind);
2363 /* Called from IO thread */
2364 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2365 pa_source_output *o;
2368 pa_source_assert_ref(s);
2369 pa_source_assert_io_context(s);
2371 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2372 s->thread_info.requested_latency_valid = false;
2376 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2378 if (s->update_requested_latency)
2379 s->update_requested_latency(s);
2381 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2382 if (o->update_source_requested_latency)
2383 o->update_source_requested_latency(o);
2387 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2390 /* Called from main thread */
2391 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2392 pa_source_assert_ref(s);
2393 pa_assert_ctl_context();
2395 /* min_latency == 0: no limit
2396 * min_latency anything else: specified limit
2398 * Similar for max_latency */
2400 if (min_latency < ABSOLUTE_MIN_LATENCY)
2401 min_latency = ABSOLUTE_MIN_LATENCY;
2403 if (max_latency <= 0 ||
2404 max_latency > ABSOLUTE_MAX_LATENCY)
2405 max_latency = ABSOLUTE_MAX_LATENCY;
2407 pa_assert(min_latency <= max_latency);
2409 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2410 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2411 max_latency == ABSOLUTE_MAX_LATENCY) ||
2412 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2414 if (PA_SOURCE_IS_LINKED(s->state)) {
2420 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2422 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2425 /* Called from main thread */
2426 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2427 pa_source_assert_ref(s);
2428 pa_assert_ctl_context();
2429 pa_assert(min_latency);
2430 pa_assert(max_latency);
2432 if (PA_SOURCE_IS_LINKED(s->state)) {
2433 pa_usec_t r[2] = { 0, 0 };
2435 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2437 *min_latency = r[0];
2438 *max_latency = r[1];
2440 *min_latency = s->thread_info.min_latency;
2441 *max_latency = s->thread_info.max_latency;
2445 /* Called from IO thread, and from main thread before pa_source_put() is called */
2446 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2447 pa_source_assert_ref(s);
2448 pa_source_assert_io_context(s);
2450 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2451 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2452 pa_assert(min_latency <= max_latency);
2454 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2455 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2456 max_latency == ABSOLUTE_MAX_LATENCY) ||
2457 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2460 if (s->thread_info.min_latency == min_latency &&
2461 s->thread_info.max_latency == max_latency)
2464 s->thread_info.min_latency = min_latency;
2465 s->thread_info.max_latency = max_latency;
2467 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2468 pa_source_output *o;
2471 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2472 if (o->update_source_latency_range)
2473 o->update_source_latency_range(o);
2476 pa_source_invalidate_requested_latency(s, false);
2479 /* Called from main thread, before the source is put */
2480 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2481 pa_source_assert_ref(s);
2482 pa_assert_ctl_context();
2484 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2485 pa_assert(latency == 0);
2489 if (latency < ABSOLUTE_MIN_LATENCY)
2490 latency = ABSOLUTE_MIN_LATENCY;
2492 if (latency > ABSOLUTE_MAX_LATENCY)
2493 latency = ABSOLUTE_MAX_LATENCY;
2495 if (PA_SOURCE_IS_LINKED(s->state))
2496 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2498 s->thread_info.fixed_latency = latency;
2501 /* Called from main thread */
2502 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2505 pa_source_assert_ref(s);
2506 pa_assert_ctl_context();
2508 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2511 if (PA_SOURCE_IS_LINKED(s->state))
2512 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2514 latency = s->thread_info.fixed_latency;
2519 /* Called from IO thread */
2520 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2521 pa_source_assert_ref(s);
2522 pa_source_assert_io_context(s);
2524 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2525 pa_assert(latency == 0);
2526 s->thread_info.fixed_latency = 0;
2531 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2532 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2534 if (s->thread_info.fixed_latency == latency)
2537 s->thread_info.fixed_latency = latency;
2539 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2540 pa_source_output *o;
2543 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2544 if (o->update_source_fixed_latency)
2545 o->update_source_fixed_latency(o);
2548 pa_source_invalidate_requested_latency(s, false);
2551 /* Called from main thread */
2552 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2553 pa_source_assert_ref(s);
2555 s->latency_offset = offset;
2557 if (PA_SOURCE_IS_LINKED(s->state))
2558 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2560 s->thread_info.latency_offset = offset;
2563 /* Called from main thread */
2564 size_t pa_source_get_max_rewind(pa_source *s) {
2566 pa_assert_ctl_context();
2567 pa_source_assert_ref(s);
2569 if (!PA_SOURCE_IS_LINKED(s->state))
2570 return s->thread_info.max_rewind;
2572 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2577 /* Called from main context */
2578 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2579 pa_device_port *port;
2582 pa_source_assert_ref(s);
2583 pa_assert_ctl_context();
2586 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2587 return -PA_ERR_NOTIMPLEMENTED;
2591 return -PA_ERR_NOENTITY;
2593 if (!(port = pa_hashmap_get(s->ports, name)))
2594 return -PA_ERR_NOENTITY;
2596 if (s->active_port == port) {
2597 s->save_port = s->save_port || save;
2601 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2602 struct source_message_set_port msg = { .port = port, .ret = 0 };
2603 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2607 ret = s->set_port(s, port);
2610 return -PA_ERR_NOENTITY;
2612 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2614 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2616 s->active_port = port;
2617 s->save_port = save;
2619 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2624 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2626 /* Called from the IO thread. */
2627 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2628 pa_source_volume_change *c;
2629 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2630 c = pa_xnew(pa_source_volume_change, 1);
2632 PA_LLIST_INIT(pa_source_volume_change, c);
2634 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2638 /* Called from the IO thread. */
2639 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2641 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2645 /* Called from the IO thread. */
2646 void pa_source_volume_change_push(pa_source *s) {
2647 pa_source_volume_change *c = NULL;
2648 pa_source_volume_change *nc = NULL;
2649 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2651 const char *direction = NULL;
2654 nc = pa_source_volume_change_new(s);
2656 /* NOTE: There is already more different volumes in pa_source that I can remember.
2657 * Adding one more volume for HW would get us rid of this, but I am trying
2658 * to survive with the ones we already have. */
2659 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2661 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2662 pa_log_debug("Volume not changing");
2663 pa_source_volume_change_free(nc);
2667 nc->at = pa_source_get_latency_within_thread(s);
2668 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2670 if (s->thread_info.volume_changes_tail) {
2671 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2672 /* If volume is going up let's do it a bit late. If it is going
2673 * down let's do it a bit early. */
2674 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2675 if (nc->at + safety_margin > c->at) {
2676 nc->at += safety_margin;
2681 else if (nc->at - safety_margin > c->at) {
2682 nc->at -= safety_margin;
2690 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2691 nc->at += safety_margin;
2694 nc->at -= safety_margin;
2697 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2700 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2703 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2705 /* We can ignore volume events that came earlier but should happen later than this. */
2706 PA_LLIST_FOREACH(c, nc->next) {
2707 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2708 pa_source_volume_change_free(c);
2711 s->thread_info.volume_changes_tail = nc;
2714 /* Called from the IO thread. */
2715 static void pa_source_volume_change_flush(pa_source *s) {
2716 pa_source_volume_change *c = s->thread_info.volume_changes;
2718 s->thread_info.volume_changes = NULL;
2719 s->thread_info.volume_changes_tail = NULL;
2721 pa_source_volume_change *next = c->next;
2722 pa_source_volume_change_free(c);
2727 /* Called from the IO thread. */
2728 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2734 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2740 pa_assert(s->write_volume);
2742 now = pa_rtclock_now();
2744 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2745 pa_source_volume_change *c = s->thread_info.volume_changes;
2746 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2747 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2748 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2750 s->thread_info.current_hw_volume = c->hw_volume;
2751 pa_source_volume_change_free(c);
2757 if (s->thread_info.volume_changes) {
2759 *usec_to_next = s->thread_info.volume_changes->at - now;
2760 if (pa_log_ratelimit(PA_LOG_DEBUG))
2761 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2766 s->thread_info.volume_changes_tail = NULL;
2771 /* Called from the main thread */
2772 /* Gets the list of formats supported by the source. The members and idxset must
2773 * be freed by the caller. */
2774 pa_idxset* pa_source_get_formats(pa_source *s) {
2779 if (s->get_formats) {
2780 /* Source supports format query, all is good */
2781 ret = s->get_formats(s);
2783 /* Source doesn't support format query, so assume it does PCM */
2784 pa_format_info *f = pa_format_info_new();
2785 f->encoding = PA_ENCODING_PCM;
2787 ret = pa_idxset_new(NULL, NULL);
2788 pa_idxset_put(ret, f, NULL);
2794 /* Called from the main thread */
2795 /* Checks if the source can accept this format */
2796 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2797 pa_idxset *formats = NULL;
2803 formats = pa_source_get_formats(s);
2806 pa_format_info *finfo_device;
2809 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2810 if (pa_format_info_is_compatible(finfo_device, f)) {
2816 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2822 /* Called from the main thread */
2823 /* Calculates the intersection between formats supported by the source and
2824 * in_formats, and returns these, in the order of the source's formats. */
2825 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2826 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2827 pa_format_info *f_source, *f_in;
2832 if (!in_formats || pa_idxset_isempty(in_formats))
2835 source_formats = pa_source_get_formats(s);
2837 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2838 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2839 if (pa_format_info_is_compatible(f_source, f_in))
2840 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2846 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2851 /* Called from the main thread. */
2852 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2853 pa_cvolume old_volume;
2854 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2855 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2860 old_volume = s->reference_volume;
2862 if (pa_cvolume_equal(volume, &old_volume))
2865 s->reference_volume = *volume;
2866 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
2867 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
2868 s->flags & PA_SOURCE_DECIBEL_VOLUME),
2869 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
2870 s->flags & PA_SOURCE_DECIBEL_VOLUME));
2872 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2873 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_VOLUME_CHANGED], s);