2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28 #include <pulse/format.h>
29 #include <pulse/utf8.h>
30 #include <pulse/xmalloc.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/rtclock.h>
34 #include <pulse/internal.h>
36 #include <pulsecore/core-util.h>
37 #include <pulsecore/source-output.h>
38 #include <pulsecore/namereg.h>
39 #include <pulsecore/core-subscribe.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/mix.h>
42 #include <pulsecore/flist.h>
46 #define ABSOLUTE_MIN_LATENCY (500)
47 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
48 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
50 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
52 struct pa_source_volume_change {
56 PA_LLIST_FIELDS(pa_source_volume_change);
59 struct set_state_data {
60 pa_source_state_t state;
61 pa_suspend_cause_t suspend_cause;
64 static void source_free(pa_object *o);
66 static void pa_source_volume_change_push(pa_source *s);
67 static void pa_source_volume_change_flush(pa_source *s);
69 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
73 data->proplist = pa_proplist_new();
74 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
83 data->name = pa_xstrdup(name);
86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
100 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
103 data->alternate_sample_rate_is_set = true;
104 data->alternate_sample_rate = alternate_sample_rate;
107 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
110 if ((data->volume_is_set = !!volume))
111 data->volume = *volume;
114 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
117 data->muted_is_set = true;
121 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
124 pa_xfree(data->active_port);
125 data->active_port = pa_xstrdup(port);
128 void pa_source_new_data_done(pa_source_new_data *data) {
131 pa_proplist_free(data->proplist);
134 pa_hashmap_free(data->ports);
136 pa_xfree(data->name);
137 pa_xfree(data->active_port);
140 /* Called from main context */
141 static void reset_callbacks(pa_source *s) {
144 s->set_state_in_main_thread = NULL;
145 s->set_state_in_io_thread = NULL;
146 s->get_volume = NULL;
147 s->set_volume = NULL;
148 s->write_volume = NULL;
151 s->update_requested_latency = NULL;
153 s->get_formats = NULL;
154 s->reconfigure = NULL;
157 /* Called from main context */
158 pa_source* pa_source_new(
160 pa_source_new_data *data,
161 pa_source_flags_t flags) {
165 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
170 pa_assert(data->name);
171 pa_assert_ctl_context();
173 s = pa_msgobject_new(pa_source);
175 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
176 pa_log_debug("Failed to register name %s.", data->name);
181 pa_source_new_data_set_name(data, name);
183 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
185 pa_namereg_unregister(core, name);
189 /* FIXME, need to free s here on failure */
191 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
192 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
194 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
196 if (!data->channel_map_is_set)
197 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
199 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
200 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
202 /* FIXME: There should probably be a general function for checking whether
203 * the source volume is allowed to be set, like there is for source outputs. */
204 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
206 if (!data->volume_is_set) {
207 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
208 data->save_volume = false;
211 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
212 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
214 if (!data->muted_is_set)
218 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
220 pa_device_init_description(data->proplist, data->card);
221 pa_device_init_icon(data->proplist, false);
222 pa_device_init_intended_roles(data->proplist);
224 if (!data->active_port) {
225 pa_device_port *p = pa_device_port_find_best(data->ports);
227 pa_source_new_data_set_port(data, p->name);
230 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
232 pa_namereg_unregister(core, name);
236 s->parent.parent.free = source_free;
237 s->parent.process_msg = pa_source_process_msg;
240 s->state = PA_SOURCE_INIT;
243 s->suspend_cause = data->suspend_cause;
244 s->name = pa_xstrdup(name);
245 s->proplist = pa_proplist_copy(data->proplist);
246 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
247 s->module = data->module;
248 s->card = data->card;
250 s->priority = pa_device_init_priority(s->proplist);
252 s->sample_spec = data->sample_spec;
253 s->channel_map = data->channel_map;
254 s->default_sample_rate = s->sample_spec.rate;
256 if (data->alternate_sample_rate_is_set)
257 s->alternate_sample_rate = data->alternate_sample_rate;
259 s->alternate_sample_rate = s->core->alternate_sample_rate;
261 s->avoid_resampling = data->avoid_resampling;
263 s->outputs = pa_idxset_new(NULL, NULL);
265 s->monitor_of = NULL;
266 s->output_from_master = NULL;
268 s->reference_volume = s->real_volume = data->volume;
269 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
270 s->base_volume = PA_VOLUME_NORM;
271 s->n_volume_steps = PA_VOLUME_NORM+1;
272 s->muted = data->muted;
273 s->refresh_volume = s->refresh_muted = false;
280 /* As a minor optimization we just steal the list instead of
282 s->ports = data->ports;
285 s->active_port = NULL;
286 s->save_port = false;
288 if (data->active_port)
289 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
290 s->save_port = data->save_port;
292 /* Hopefully the active port has already been assigned in the previous call
293 to pa_device_port_find_best, but better safe than sorry */
295 s->active_port = pa_device_port_find_best(s->ports);
298 s->port_latency_offset = s->active_port->latency_offset;
300 s->port_latency_offset = 0;
302 s->save_volume = data->save_volume;
303 s->save_muted = data->save_muted;
305 pa_silence_memchunk_get(
306 &core->silence_cache,
312 s->thread_info.rtpoll = NULL;
313 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
314 (pa_free_cb_t) pa_source_output_unref);
315 s->thread_info.soft_volume = s->soft_volume;
316 s->thread_info.soft_muted = s->muted;
317 s->thread_info.state = s->state;
318 s->thread_info.max_rewind = 0;
319 s->thread_info.requested_latency_valid = false;
320 s->thread_info.requested_latency = 0;
321 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
322 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
323 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
325 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
326 s->thread_info.volume_changes_tail = NULL;
327 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
328 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
329 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
330 s->thread_info.port_latency_offset = s->port_latency_offset;
332 /* FIXME: This should probably be moved to pa_source_put() */
333 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
336 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
338 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
339 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
342 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
343 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
350 /* Called from main context */
351 static int source_set_state(pa_source *s, pa_source_state_t state, pa_suspend_cause_t suspend_cause) {
354 bool suspend_cause_changed;
359 pa_assert_ctl_context();
361 state_changed = state != s->state;
362 suspend_cause_changed = suspend_cause != s->suspend_cause;
364 if (!state_changed && !suspend_cause_changed)
367 suspending = PA_SOURCE_IS_OPENED(s->state) && state == PA_SOURCE_SUSPENDED;
368 resuming = s->state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state);
370 /* If we are resuming, suspend_cause must be 0. */
371 pa_assert(!resuming || !suspend_cause);
373 /* Here's something to think about: what to do with the suspend cause if
374 * resuming the source fails? The old suspend cause will be incorrect, so we
375 * can't use that. On the other hand, if we set no suspend cause (as is the
376 * case currently), then it looks strange to have a source suspended without
377 * any cause. It might be a good idea to add a new "resume failed" suspend
378 * cause, or it might just add unnecessary complexity, given that the
379 * current approach of not setting any suspend cause works well enough. */
381 if (s->set_state_in_main_thread) {
382 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
383 /* set_state_in_main_thread() is allowed to fail only when resuming. */
386 /* If resuming fails, we set the state to SUSPENDED and
387 * suspend_cause to 0. */
388 state = PA_SOURCE_SUSPENDED;
390 state_changed = false;
391 suspend_cause_changed = suspend_cause != s->suspend_cause;
394 /* We know the state isn't changing. If the suspend cause isn't
395 * changing either, then there's nothing more to do. */
396 if (!suspend_cause_changed)
402 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
404 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
405 /* SET_STATE is allowed to fail only when resuming. */
408 if (s->set_state_in_main_thread)
409 s->set_state_in_main_thread(s, PA_SOURCE_SUSPENDED, 0);
411 /* If resuming fails, we set the state to SUSPENDED and
412 * suspend_cause to 0. */
413 state = PA_SOURCE_SUSPENDED;
415 state_changed = false;
416 suspend_cause_changed = suspend_cause != s->suspend_cause;
419 /* We know the state isn't changing. If the suspend cause isn't
420 * changing either, then there's nothing more to do. */
421 if (!suspend_cause_changed)
426 if (suspend_cause_changed) {
427 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
428 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
430 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
431 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
432 s->suspend_cause = suspend_cause;
436 pa_log_debug("%s: state: %s -> %s", s->name, pa_source_state_to_string(s->state), pa_source_state_to_string(state));
439 /* If we enter UNLINKED state, then we don't send change notifications.
440 * pa_source_unlink() will send unlink notifications instead. */
441 if (state == PA_SOURCE_UNLINKED) {
442 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
443 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
447 if (suspending || resuming) {
451 /* We're suspending or resuming, tell everyone about it */
453 PA_IDXSET_FOREACH(o, s->outputs, idx)
454 if (s->state == PA_SOURCE_SUSPENDED &&
455 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
456 pa_source_output_kill(o);
458 o->suspend(o, state == PA_SOURCE_SUSPENDED);
464 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
470 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
471 pa_source_flags_t flags;
474 pa_assert(!s->write_volume || cb);
478 /* Save the current flags so we can tell if they've changed */
482 /* The source implementor is responsible for setting decibel volume support */
483 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
485 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
486 /* See note below in pa_source_put() about volume sharing and decibel volumes */
487 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
490 /* If the flags have changed after init, let any clients know via a change event */
491 if (s->state != PA_SOURCE_INIT && flags != s->flags)
492 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
495 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
496 pa_source_flags_t flags;
499 pa_assert(!cb || s->set_volume);
501 s->write_volume = cb;
503 /* Save the current flags so we can tell if they've changed */
507 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
509 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
511 /* If the flags have changed after init, let any clients know via a change event */
512 if (s->state != PA_SOURCE_INIT && flags != s->flags)
513 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
516 void pa_source_set_get_mute_callback(pa_source *s, pa_source_get_mute_cb_t cb) {
522 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
523 pa_source_flags_t flags;
529 /* Save the current flags so we can tell if they've changed */
533 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
535 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
537 /* If the flags have changed after init, let any clients know via a change event */
538 if (s->state != PA_SOURCE_INIT && flags != s->flags)
539 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
542 static void enable_flat_volume(pa_source *s, bool enable) {
543 pa_source_flags_t flags;
547 /* Always follow the overall user preference here */
548 enable = enable && s->core->flat_volumes;
550 /* Save the current flags so we can tell if they've changed */
554 s->flags |= PA_SOURCE_FLAT_VOLUME;
556 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
558 /* If the flags have changed after init, let any clients know via a change event */
559 if (s->state != PA_SOURCE_INIT && flags != s->flags)
560 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
563 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
564 pa_source_flags_t flags;
568 /* Save the current flags so we can tell if they've changed */
572 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
573 enable_flat_volume(s, true);
575 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
576 enable_flat_volume(s, false);
579 /* If the flags have changed after init, let any clients know via a change event */
580 if (s->state != PA_SOURCE_INIT && flags != s->flags)
581 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
584 /* Called from main context */
585 void pa_source_put(pa_source *s) {
586 pa_source_assert_ref(s);
587 pa_assert_ctl_context();
589 pa_assert(s->state == PA_SOURCE_INIT);
590 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || pa_source_is_filter(s));
592 /* The following fields must be initialized properly when calling _put() */
593 pa_assert(s->asyncmsgq);
594 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
596 /* Generally, flags should be initialized via pa_source_new(). As a
597 * special exception we allow some volume related flags to be set
598 * between _new() and _put() by the callback setter functions above.
600 * Thus we implement a couple safeguards here which ensure the above
601 * setters were used (or at least the implementor made manual changes
602 * in a compatible way).
604 * Note: All of these flags set here can change over the life time
606 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
607 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
608 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
610 /* XXX: Currently decibel volume is disabled for all sources that use volume
611 * sharing. When the master source supports decibel volume, it would be good
612 * to have the flag also in the filter source, but currently we don't do that
613 * so that the flags of the filter source never change when it's moved from
614 * a master source to another. One solution for this problem would be to
615 * remove user-visible volume altogether from filter sources when volume
616 * sharing is used, but the current approach was easier to implement... */
617 /* We always support decibel volumes in software, otherwise we leave it to
618 * the source implementor to set this flag as needed.
620 * Note: This flag can also change over the life time of the source. */
621 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
622 pa_source_enable_decibel_volume(s, true);
623 s->soft_volume = s->reference_volume;
626 /* If the source implementor support DB volumes by itself, we should always
627 * try and enable flat volumes too */
628 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
629 enable_flat_volume(s, true);
631 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
632 pa_source *root_source = pa_source_get_master(s);
634 pa_assert(PA_LIKELY(root_source));
636 s->reference_volume = root_source->reference_volume;
637 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
639 s->real_volume = root_source->real_volume;
640 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
642 /* We assume that if the sink implementor changed the default
643 * volume he did so in real_volume, because that is the usual
644 * place where he is supposed to place his changes. */
645 s->reference_volume = s->real_volume;
647 s->thread_info.soft_volume = s->soft_volume;
648 s->thread_info.soft_muted = s->muted;
649 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
651 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
652 || (s->base_volume == PA_VOLUME_NORM
653 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
654 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
655 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
657 if (s->suspend_cause)
658 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED, s->suspend_cause) == 0);
660 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE, 0) == 0);
662 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
663 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
665 /* This function must be called after the PA_CORE_HOOK_SOURCE_PUT hook,
666 * because module-switch-on-connect needs to know the old default source */
667 pa_core_update_default_source(s->core);
670 /* Called from main context */
671 void pa_source_unlink(pa_source *s) {
673 pa_source_output *o, PA_UNUSED *j = NULL;
675 pa_source_assert_ref(s);
676 pa_assert_ctl_context();
678 /* See pa_sink_unlink() for a couple of comments how this function
681 if (s->unlink_requested)
684 s->unlink_requested = true;
686 linked = PA_SOURCE_IS_LINKED(s->state);
689 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
691 if (s->state != PA_SOURCE_UNLINKED)
692 pa_namereg_unregister(s->core, s->name);
693 pa_idxset_remove_by_data(s->core->sources, s, NULL);
695 pa_core_update_default_source(s->core);
698 pa_idxset_remove_by_data(s->card->sources, s, NULL);
700 while ((o = pa_idxset_first(s->outputs, NULL))) {
702 pa_source_output_kill(o);
707 source_set_state(s, PA_SOURCE_UNLINKED, 0);
709 s->state = PA_SOURCE_UNLINKED;
714 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
715 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
719 /* Called from main context */
720 static void source_free(pa_object *o) {
721 pa_source *s = PA_SOURCE(o);
724 pa_assert_ctl_context();
725 pa_assert(pa_source_refcnt(s) == 0);
726 pa_assert(!PA_SOURCE_IS_LINKED(s->state));
728 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
730 pa_source_volume_change_flush(s);
732 pa_idxset_free(s->outputs, NULL);
733 pa_hashmap_free(s->thread_info.outputs);
735 if (s->silence.memblock)
736 pa_memblock_unref(s->silence.memblock);
742 pa_proplist_free(s->proplist);
745 pa_hashmap_free(s->ports);
750 /* Called from main context, and not while the IO thread is active, please */
751 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
752 pa_source_assert_ref(s);
753 pa_assert_ctl_context();
758 /* Called from main context, and not while the IO thread is active, please */
759 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
760 pa_source_flags_t old_flags;
761 pa_source_output *output;
764 pa_source_assert_ref(s);
765 pa_assert_ctl_context();
767 /* For now, allow only a minimal set of flags to be changed. */
768 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
770 old_flags = s->flags;
771 s->flags = (s->flags & ~mask) | (value & mask);
773 if (s->flags == old_flags)
776 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
777 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
779 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
780 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
781 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
783 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
784 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
786 PA_IDXSET_FOREACH(output, s->outputs, idx) {
787 if (output->destination_source)
788 pa_source_update_flags(output->destination_source, mask, value);
792 /* Called from IO context, or before _put() from main context */
793 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
794 pa_source_assert_ref(s);
795 pa_source_assert_io_context(s);
797 s->thread_info.rtpoll = p;
800 /* Called from main context */
801 int pa_source_update_status(pa_source*s) {
802 pa_source_assert_ref(s);
803 pa_assert_ctl_context();
804 pa_assert(PA_SOURCE_IS_LINKED(s->state));
806 if (s->state == PA_SOURCE_SUSPENDED)
809 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE, 0);
812 /* Called from main context */
813 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
814 pa_suspend_cause_t merged_cause;
816 pa_source_assert_ref(s);
817 pa_assert_ctl_context();
818 pa_assert(PA_SOURCE_IS_LINKED(s->state));
819 pa_assert(cause != 0);
821 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
822 return -PA_ERR_NOTSUPPORTED;
825 merged_cause = s->suspend_cause | cause;
827 merged_cause = s->suspend_cause & ~cause;
830 return source_set_state(s, PA_SOURCE_SUSPENDED, merged_cause);
832 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE, 0);
835 /* Called from main context */
836 int pa_source_sync_suspend(pa_source *s) {
837 pa_sink_state_t state;
838 pa_suspend_cause_t suspend_cause;
840 pa_source_assert_ref(s);
841 pa_assert_ctl_context();
842 pa_assert(PA_SOURCE_IS_LINKED(s->state));
843 pa_assert(s->monitor_of);
845 state = s->monitor_of->state;
846 suspend_cause = s->monitor_of->suspend_cause;
848 /* The monitor source usually has the same state and suspend cause as the
849 * sink, the only exception is when the monitor source is suspended due to
850 * the sink being in the passthrough mode. If the monitor currently has the
851 * PASSTHROUGH suspend cause, then we have to keep the monitor suspended
852 * even if the sink is running. */
853 if (s->suspend_cause & PA_SUSPEND_PASSTHROUGH)
854 suspend_cause |= PA_SUSPEND_PASSTHROUGH;
856 if (state == PA_SINK_SUSPENDED || suspend_cause)
857 return source_set_state(s, PA_SOURCE_SUSPENDED, suspend_cause);
859 pa_assert(PA_SINK_IS_OPENED(state));
861 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE, 0);
864 /* Called from main context */
865 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
866 pa_source_output *o, *n;
869 pa_source_assert_ref(s);
870 pa_assert_ctl_context();
871 pa_assert(PA_SOURCE_IS_LINKED(s->state));
876 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
877 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
879 pa_source_output_ref(o);
881 if (pa_source_output_start_move(o) >= 0)
884 pa_source_output_unref(o);
890 /* Called from main context */
891 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
894 pa_source_assert_ref(s);
895 pa_assert_ctl_context();
896 pa_assert(PA_SOURCE_IS_LINKED(s->state));
899 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
900 if (PA_SOURCE_OUTPUT_IS_LINKED(o->state)) {
901 if (pa_source_output_finish_move(o, s, save) < 0)
902 pa_source_output_fail_move(o);
905 pa_source_output_unref(o);
908 pa_queue_free(q, NULL);
911 /* Called from main context */
912 void pa_source_move_all_fail(pa_queue *q) {
915 pa_assert_ctl_context();
918 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
919 pa_source_output_fail_move(o);
920 pa_source_output_unref(o);
923 pa_queue_free(q, NULL);
926 /* Called from IO thread context */
927 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
931 pa_source_assert_ref(s);
932 pa_source_assert_io_context(s);
933 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
938 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
941 pa_log_debug("Processing rewind...");
943 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
944 pa_source_output_assert_ref(o);
945 pa_source_output_process_rewind(o, nbytes);
949 /* Called from IO thread context */
950 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
954 pa_source_assert_ref(s);
955 pa_source_assert_io_context(s);
956 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
959 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
962 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
963 pa_memchunk vchunk = *chunk;
965 pa_memblock_ref(vchunk.memblock);
966 pa_memchunk_make_writable(&vchunk, 0);
968 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
969 pa_silence_memchunk(&vchunk, &s->sample_spec);
971 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
973 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
974 pa_source_output_assert_ref(o);
976 if (!o->thread_info.direct_on_input)
977 pa_source_output_push(o, &vchunk);
980 pa_memblock_unref(vchunk.memblock);
983 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
984 pa_source_output_assert_ref(o);
986 if (!o->thread_info.direct_on_input)
987 pa_source_output_push(o, chunk);
992 /* Called from IO thread context */
993 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
994 pa_source_assert_ref(s);
995 pa_source_assert_io_context(s);
996 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
997 pa_source_output_assert_ref(o);
998 pa_assert(o->thread_info.direct_on_input);
1001 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1004 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
1005 pa_memchunk vchunk = *chunk;
1007 pa_memblock_ref(vchunk.memblock);
1008 pa_memchunk_make_writable(&vchunk, 0);
1010 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
1011 pa_silence_memchunk(&vchunk, &s->sample_spec);
1013 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
1015 pa_source_output_push(o, &vchunk);
1017 pa_memblock_unref(vchunk.memblock);
1019 pa_source_output_push(o, chunk);
1022 /* Called from main thread */
1023 void pa_source_reconfigure(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1025 pa_source_output *o;
1026 pa_sample_spec desired_spec;
1027 uint32_t default_rate = s->default_sample_rate;
1028 uint32_t alternate_rate = s->alternate_sample_rate;
1029 bool default_rate_is_usable = false;
1030 bool alternate_rate_is_usable = false;
1031 bool avoid_resampling = s->avoid_resampling;
1033 if (pa_sample_spec_equal(spec, &s->sample_spec))
1036 if (!s->reconfigure && !s->monitor_of)
1039 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1040 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1044 if (PA_SOURCE_IS_RUNNING(s->state)) {
1045 pa_log_info("Cannot update sample spec, SOURCE_IS_RUNNING, will keep using %s and %u Hz",
1046 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1050 if (s->monitor_of) {
1051 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
1052 pa_log_info("Cannot update sample spec, this is a monitor source and the sink is running.");
1057 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1060 desired_spec = s->sample_spec;
1063 /* We have to try to use the source output format and rate */
1064 desired_spec.format = spec->format;
1065 desired_spec.rate = spec->rate;
1067 } else if (avoid_resampling) {
1068 /* We just try to set the source output's sample rate if it's not too low */
1069 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1070 desired_spec.rate = spec->rate;
1071 desired_spec.format = spec->format;
1073 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1074 /* We can directly try to use this rate */
1075 desired_spec.rate = spec->rate;
1079 if (desired_spec.rate != spec->rate) {
1080 /* See if we can pick a rate that results in less resampling effort */
1081 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1082 default_rate_is_usable = true;
1083 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1084 default_rate_is_usable = true;
1085 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1086 alternate_rate_is_usable = true;
1087 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1088 alternate_rate_is_usable = true;
1090 if (alternate_rate_is_usable && !default_rate_is_usable)
1091 desired_spec.rate = alternate_rate;
1093 desired_spec.rate = default_rate;
1096 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_source_is_passthrough(s))
1099 if (!passthrough && pa_source_used_by(s) > 0)
1102 pa_log_debug("Suspending source %s due to changing format, desired format = %s rate = %u",
1103 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1104 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1107 s->reconfigure(s, &desired_spec, passthrough);
1109 /* This is a monitor source. */
1111 /* XXX: This code is written with non-passthrough streams in mind. I
1112 * have no idea whether the behaviour with passthrough streams is
1115 s->sample_spec = desired_spec;
1116 pa_sink_reconfigure(s->monitor_of, &desired_spec, false);
1117 s->sample_spec = s->monitor_of->sample_spec;
1122 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1123 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1124 pa_source_output_update_resampler(o);
1127 pa_log_info("Reconfigured successfully");
1130 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1133 /* Called from main thread */
1134 pa_usec_t pa_source_get_latency(pa_source *s) {
1137 pa_source_assert_ref(s);
1138 pa_assert_ctl_context();
1139 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1141 if (s->state == PA_SOURCE_SUSPENDED)
1144 if (!(s->flags & PA_SOURCE_LATENCY))
1147 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1149 /* The return value is unsigned, so check that the offset can be added to usec without
1151 if (-s->port_latency_offset <= usec)
1152 usec += s->port_latency_offset;
1156 return (pa_usec_t)usec;
1159 /* Called from IO thread */
1160 int64_t pa_source_get_latency_within_thread(pa_source *s, bool allow_negative) {
1164 pa_source_assert_ref(s);
1165 pa_source_assert_io_context(s);
1166 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1168 /* The returned value is supposed to be in the time domain of the sound card! */
1170 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1173 if (!(s->flags & PA_SOURCE_LATENCY))
1176 o = PA_MSGOBJECT(s);
1178 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1180 o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1182 /* If allow_negative is false, the call should only return positive values, */
1183 usec += s->thread_info.port_latency_offset;
1184 if (!allow_negative && usec < 0)
1190 /* Called from the main thread (and also from the IO thread while the main
1191 * thread is waiting).
1193 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1194 * set. Instead, flat volume mode is detected by checking whether the root source
1195 * has the flag set. */
1196 bool pa_source_flat_volume_enabled(pa_source *s) {
1197 pa_source_assert_ref(s);
1199 s = pa_source_get_master(s);
1202 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1207 /* Called from the main thread (and also from the IO thread while the main
1208 * thread is waiting). */
1209 pa_source *pa_source_get_master(pa_source *s) {
1210 pa_source_assert_ref(s);
1212 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1213 if (PA_UNLIKELY(!s->output_from_master))
1216 s = s->output_from_master->source;
1222 /* Called from main context */
1223 bool pa_source_is_filter(pa_source *s) {
1224 pa_source_assert_ref(s);
1226 return (s->output_from_master != NULL);
1229 /* Called from main context */
1230 bool pa_source_is_passthrough(pa_source *s) {
1232 pa_source_assert_ref(s);
1234 /* NB Currently only monitor sources support passthrough mode */
1235 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1238 /* Called from main context */
1239 void pa_source_enter_passthrough(pa_source *s) {
1242 /* set the volume to NORM */
1243 s->saved_volume = *pa_source_get_volume(s, true);
1244 s->saved_save_volume = s->save_volume;
1246 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1247 pa_source_set_volume(s, &volume, true, false);
1250 /* Called from main context */
1251 void pa_source_leave_passthrough(pa_source *s) {
1252 /* Restore source volume to what it was before we entered passthrough mode */
1253 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1255 pa_cvolume_init(&s->saved_volume);
1256 s->saved_save_volume = false;
1259 /* Called from main context. */
1260 static void compute_reference_ratio(pa_source_output *o) {
1262 pa_cvolume remapped;
1266 pa_assert(pa_source_flat_volume_enabled(o->source));
1269 * Calculates the reference ratio from the source's reference
1270 * volume. This basically calculates:
1272 * o->reference_ratio = o->volume / o->source->reference_volume
1275 remapped = o->source->reference_volume;
1276 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1278 ratio = o->reference_ratio;
1280 for (c = 0; c < o->sample_spec.channels; c++) {
1282 /* We don't update when the source volume is 0 anyway */
1283 if (remapped.values[c] <= PA_VOLUME_MUTED)
1286 /* Don't update the reference ratio unless necessary */
1287 if (pa_sw_volume_multiply(
1289 remapped.values[c]) == o->volume.values[c])
1292 ratio.values[c] = pa_sw_volume_divide(
1293 o->volume.values[c],
1294 remapped.values[c]);
1297 pa_source_output_set_reference_ratio(o, &ratio);
1300 /* Called from main context. Only called for the root source in volume sharing
1301 * cases, except for internal recursive calls. */
1302 static void compute_reference_ratios(pa_source *s) {
1304 pa_source_output *o;
1306 pa_source_assert_ref(s);
1307 pa_assert_ctl_context();
1308 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1309 pa_assert(pa_source_flat_volume_enabled(s));
1311 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1312 compute_reference_ratio(o);
1314 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1315 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1316 compute_reference_ratios(o->destination_source);
1320 /* Called from main context. Only called for the root source in volume sharing
1321 * cases, except for internal recursive calls. */
1322 static void compute_real_ratios(pa_source *s) {
1323 pa_source_output *o;
1326 pa_source_assert_ref(s);
1327 pa_assert_ctl_context();
1328 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1329 pa_assert(pa_source_flat_volume_enabled(s));
1331 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1333 pa_cvolume remapped;
1335 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1336 /* The origin source uses volume sharing, so this input's real ratio
1337 * is handled as a special case - the real ratio must be 0 dB, and
1338 * as a result i->soft_volume must equal i->volume_factor. */
1339 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1340 o->soft_volume = o->volume_factor;
1342 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1343 compute_real_ratios(o->destination_source);
1349 * This basically calculates:
1351 * i->real_ratio := i->volume / s->real_volume
1352 * i->soft_volume := i->real_ratio * i->volume_factor
1355 remapped = s->real_volume;
1356 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1358 o->real_ratio.channels = o->sample_spec.channels;
1359 o->soft_volume.channels = o->sample_spec.channels;
1361 for (c = 0; c < o->sample_spec.channels; c++) {
1363 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1364 /* We leave o->real_ratio untouched */
1365 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1369 /* Don't lose accuracy unless necessary */
1370 if (pa_sw_volume_multiply(
1371 o->real_ratio.values[c],
1372 remapped.values[c]) != o->volume.values[c])
1374 o->real_ratio.values[c] = pa_sw_volume_divide(
1375 o->volume.values[c],
1376 remapped.values[c]);
1378 o->soft_volume.values[c] = pa_sw_volume_multiply(
1379 o->real_ratio.values[c],
1380 o->volume_factor.values[c]);
1383 /* We don't copy the soft_volume to the thread_info data
1384 * here. That must be done by the caller */
1388 static pa_cvolume *cvolume_remap_minimal_impact(
1390 const pa_cvolume *template,
1391 const pa_channel_map *from,
1392 const pa_channel_map *to) {
1397 pa_assert(template);
1400 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1401 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1403 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1404 * mapping from source output to source volumes:
1406 * If template is a possible remapping from v it is used instead
1407 * of remapping anew.
1409 * If the channel maps don't match we set an all-channel volume on
1410 * the source to ensure that changing a volume on one stream has no
1411 * effect that cannot be compensated for in another stream that
1412 * does not have the same channel map as the source. */
1414 if (pa_channel_map_equal(from, to))
1418 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1423 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1427 /* Called from main thread. Only called for the root source in volume sharing
1428 * cases, except for internal recursive calls. */
1429 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1430 pa_source_output *o;
1433 pa_source_assert_ref(s);
1434 pa_assert(max_volume);
1435 pa_assert(channel_map);
1436 pa_assert(pa_source_flat_volume_enabled(s));
1438 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1439 pa_cvolume remapped;
1441 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1442 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1443 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1445 /* Ignore this output. The origin source uses volume sharing, so this
1446 * output's volume will be set to be equal to the root source's real
1447 * volume. Obviously this output's current volume must not then
1448 * affect what the root source's real volume will be. */
1452 remapped = o->volume;
1453 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1454 pa_cvolume_merge(max_volume, max_volume, &remapped);
1458 /* Called from main thread. Only called for the root source in volume sharing
1459 * cases, except for internal recursive calls. */
1460 static bool has_outputs(pa_source *s) {
1461 pa_source_output *o;
1464 pa_source_assert_ref(s);
1466 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1467 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1474 /* Called from main thread. Only called for the root source in volume sharing
1475 * cases, except for internal recursive calls. */
1476 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1477 pa_source_output *o;
1480 pa_source_assert_ref(s);
1481 pa_assert(new_volume);
1482 pa_assert(channel_map);
1484 s->real_volume = *new_volume;
1485 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1487 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1488 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1489 if (pa_source_flat_volume_enabled(s)) {
1490 pa_cvolume new_output_volume;
1492 /* Follow the root source's real volume. */
1493 new_output_volume = *new_volume;
1494 pa_cvolume_remap(&new_output_volume, channel_map, &o->channel_map);
1495 pa_source_output_set_volume_direct(o, &new_output_volume);
1496 compute_reference_ratio(o);
1499 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1500 update_real_volume(o->destination_source, new_volume, channel_map);
1505 /* Called from main thread. Only called for the root source in shared volume
1507 static void compute_real_volume(pa_source *s) {
1508 pa_source_assert_ref(s);
1509 pa_assert_ctl_context();
1510 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1511 pa_assert(pa_source_flat_volume_enabled(s));
1512 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1514 /* This determines the maximum volume of all streams and sets
1515 * s->real_volume accordingly. */
1517 if (!has_outputs(s)) {
1518 /* In the special case that we have no source outputs we leave the
1519 * volume unmodified. */
1520 update_real_volume(s, &s->reference_volume, &s->channel_map);
1524 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1526 /* First let's determine the new maximum volume of all outputs
1527 * connected to this source */
1528 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1529 update_real_volume(s, &s->real_volume, &s->channel_map);
1531 /* Then, let's update the real ratios/soft volumes of all outputs
1532 * connected to this source */
1533 compute_real_ratios(s);
1536 /* Called from main thread. Only called for the root source in shared volume
1537 * cases, except for internal recursive calls. */
1538 static void propagate_reference_volume(pa_source *s) {
1539 pa_source_output *o;
1542 pa_source_assert_ref(s);
1543 pa_assert_ctl_context();
1544 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1545 pa_assert(pa_source_flat_volume_enabled(s));
1547 /* This is called whenever the source volume changes that is not
1548 * caused by a source output volume change. We need to fix up the
1549 * source output volumes accordingly */
1551 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1552 pa_cvolume new_volume;
1554 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1555 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1556 propagate_reference_volume(o->destination_source);
1558 /* Since the origin source uses volume sharing, this output's volume
1559 * needs to be updated to match the root source's real volume, but
1560 * that will be done later in update_real_volume(). */
1564 /* This basically calculates:
1566 * o->volume := o->reference_volume * o->reference_ratio */
1568 new_volume = s->reference_volume;
1569 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1570 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1571 pa_source_output_set_volume_direct(o, &new_volume);
1575 /* Called from main thread. Only called for the root source in volume sharing
1576 * cases, except for internal recursive calls. The return value indicates
1577 * whether any reference volume actually changed. */
1578 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1580 bool reference_volume_changed;
1581 pa_source_output *o;
1584 pa_source_assert_ref(s);
1585 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1587 pa_assert(channel_map);
1588 pa_assert(pa_cvolume_valid(v));
1591 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1593 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1594 pa_source_set_reference_volume_direct(s, &volume);
1596 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1598 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1599 /* If the root source's volume doesn't change, then there can't be any
1600 * changes in the other source in the source tree either.
1602 * It's probably theoretically possible that even if the root source's
1603 * volume changes slightly, some filter source doesn't change its volume
1604 * due to rounding errors. If that happens, we still want to propagate
1605 * the changed root source volume to the sources connected to the
1606 * intermediate source that didn't change its volume. This theoretical
1607 * possibility is the reason why we have that !(s->flags &
1608 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1609 * notice even if we returned here false always if
1610 * reference_volume_changed is false. */
1613 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1614 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1615 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1616 update_reference_volume(o->destination_source, v, channel_map, false);
1622 /* Called from main thread */
1623 void pa_source_set_volume(
1625 const pa_cvolume *volume,
1629 pa_cvolume new_reference_volume, root_real_volume;
1630 pa_source *root_source;
1632 pa_source_assert_ref(s);
1633 pa_assert_ctl_context();
1634 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1635 pa_assert(!volume || pa_cvolume_valid(volume));
1636 pa_assert(volume || pa_source_flat_volume_enabled(s));
1637 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1639 /* make sure we don't change the volume in PASSTHROUGH mode ...
1640 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1641 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1642 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1646 /* In case of volume sharing, the volume is set for the root source first,
1647 * from which it's then propagated to the sharing sources. */
1648 root_source = pa_source_get_master(s);
1650 if (PA_UNLIKELY(!root_source))
1653 /* As a special exception we accept mono volumes on all sources --
1654 * even on those with more complex channel maps */
1657 if (pa_cvolume_compatible(volume, &s->sample_spec))
1658 new_reference_volume = *volume;
1660 new_reference_volume = s->reference_volume;
1661 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1664 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1666 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1667 if (pa_source_flat_volume_enabled(root_source)) {
1668 /* OK, propagate this volume change back to the outputs */
1669 propagate_reference_volume(root_source);
1671 /* And now recalculate the real volume */
1672 compute_real_volume(root_source);
1674 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1678 /* If volume is NULL we synchronize the source's real and
1679 * reference volumes with the stream volumes. */
1681 pa_assert(pa_source_flat_volume_enabled(root_source));
1683 /* Ok, let's determine the new real volume */
1684 compute_real_volume(root_source);
1686 /* To propagate the reference volume from the filter to the root source,
1687 * we first take the real volume from the root source and remap it to
1688 * match the filter. Then, we merge in the reference volume from the
1689 * filter on top of this, and remap it back to the root source channel
1691 root_real_volume = root_source->real_volume;
1692 /* First we remap root's real volume to filter channel count and map if needed */
1693 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1694 pa_cvolume_remap(&root_real_volume, &root_source->channel_map, &s->channel_map);
1695 /* Then let's 'push' the reference volume if necessary */
1696 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_real_volume);
1697 /* If the source and its root don't have the same number of channels, we need to remap back */
1698 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1699 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1701 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1703 /* Now that the reference volume is updated, we can update the streams'
1704 * reference ratios. */
1705 compute_reference_ratios(root_source);
1708 if (root_source->set_volume) {
1709 /* If we have a function set_volume(), then we do not apply a
1710 * soft volume by default. However, set_volume() is free to
1711 * apply one to root_source->soft_volume */
1713 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1714 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1715 root_source->set_volume(root_source);
1718 /* If we have no function set_volume(), then the soft volume
1719 * becomes the real volume */
1720 root_source->soft_volume = root_source->real_volume;
1722 /* This tells the source that soft volume and/or real volume changed */
1724 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1727 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1728 * Only to be called by source implementor */
1729 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1731 pa_source_assert_ref(s);
1732 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1734 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1735 pa_source_assert_io_context(s);
1737 pa_assert_ctl_context();
1740 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1742 s->soft_volume = *volume;
1744 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1745 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1747 s->thread_info.soft_volume = s->soft_volume;
1750 /* Called from the main thread. Only called for the root source in volume sharing
1751 * cases, except for internal recursive calls. */
1752 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1753 pa_source_output *o;
1756 pa_source_assert_ref(s);
1757 pa_assert(old_real_volume);
1758 pa_assert_ctl_context();
1759 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1761 /* This is called when the hardware's real volume changes due to
1762 * some external event. We copy the real volume into our
1763 * reference volume and then rebuild the stream volumes based on
1764 * i->real_ratio which should stay fixed. */
1766 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1767 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1770 /* 1. Make the real volume the reference volume */
1771 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1774 if (pa_source_flat_volume_enabled(s)) {
1775 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1776 pa_cvolume new_volume;
1778 /* 2. Since the source's reference and real volumes are equal
1779 * now our ratios should be too. */
1780 pa_source_output_set_reference_ratio(o, &o->real_ratio);
1782 /* 3. Recalculate the new stream reference volume based on the
1783 * reference ratio and the sink's reference volume.
1785 * This basically calculates:
1787 * o->volume = s->reference_volume * o->reference_ratio
1789 * This is identical to propagate_reference_volume() */
1790 new_volume = s->reference_volume;
1791 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1792 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1793 pa_source_output_set_volume_direct(o, &new_volume);
1795 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1796 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1797 propagate_real_volume(o->destination_source, old_real_volume);
1801 /* Something got changed in the hardware. It probably makes sense
1802 * to save changed hw settings given that hw volume changes not
1803 * triggered by PA are almost certainly done by the user. */
1804 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1805 s->save_volume = true;
1808 /* Called from io thread */
1809 void pa_source_update_volume_and_mute(pa_source *s) {
1811 pa_source_assert_io_context(s);
1813 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1816 /* Called from main thread */
1817 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1818 pa_source_assert_ref(s);
1819 pa_assert_ctl_context();
1820 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1822 if (s->refresh_volume || force_refresh) {
1823 struct pa_cvolume old_real_volume;
1825 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1827 old_real_volume = s->real_volume;
1829 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1832 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1834 update_real_volume(s, &s->real_volume, &s->channel_map);
1835 propagate_real_volume(s, &old_real_volume);
1838 return &s->reference_volume;
1841 /* Called from main thread. In volume sharing cases, only the root source may
1843 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1844 pa_cvolume old_real_volume;
1846 pa_source_assert_ref(s);
1847 pa_assert_ctl_context();
1848 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1849 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1851 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1853 old_real_volume = s->real_volume;
1854 update_real_volume(s, new_real_volume, &s->channel_map);
1855 propagate_real_volume(s, &old_real_volume);
1858 /* Called from main thread */
1859 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1862 pa_source_assert_ref(s);
1863 pa_assert_ctl_context();
1865 old_muted = s->muted;
1867 if (mute == old_muted) {
1868 s->save_muted |= save;
1873 s->save_muted = save;
1875 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute) {
1876 s->set_mute_in_progress = true;
1878 s->set_mute_in_progress = false;
1881 if (!PA_SOURCE_IS_LINKED(s->state))
1884 pa_log_debug("The mute of source %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
1885 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1886 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1887 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_MUTE_CHANGED], s);
1890 /* Called from main thread */
1891 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1893 pa_source_assert_ref(s);
1894 pa_assert_ctl_context();
1895 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1897 if ((s->refresh_muted || force_refresh) && s->get_mute) {
1900 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1901 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
1902 pa_source_mute_changed(s, mute);
1904 if (s->get_mute(s, &mute) >= 0)
1905 pa_source_mute_changed(s, mute);
1912 /* Called from main thread */
1913 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1914 pa_source_assert_ref(s);
1915 pa_assert_ctl_context();
1916 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1918 if (s->set_mute_in_progress)
1921 /* pa_source_set_mute() does this same check, so this may appear redundant,
1922 * but we must have this here also, because the save parameter of
1923 * pa_source_set_mute() would otherwise have unintended side effects
1924 * (saving the mute state when it shouldn't be saved). */
1925 if (new_muted == s->muted)
1928 pa_source_set_mute(s, new_muted, true);
1931 /* Called from main thread */
1932 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1933 pa_source_assert_ref(s);
1934 pa_assert_ctl_context();
1937 pa_proplist_update(s->proplist, mode, p);
1939 if (PA_SOURCE_IS_LINKED(s->state)) {
1940 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1941 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1947 /* Called from main thread */
1948 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1949 void pa_source_set_description(pa_source *s, const char *description) {
1951 pa_source_assert_ref(s);
1952 pa_assert_ctl_context();
1954 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1957 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1959 if (old && description && pa_streq(old, description))
1963 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1965 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1967 if (PA_SOURCE_IS_LINKED(s->state)) {
1968 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1969 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1973 /* Called from main thread */
1974 unsigned pa_source_linked_by(pa_source *s) {
1975 pa_source_assert_ref(s);
1976 pa_assert_ctl_context();
1977 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1979 return pa_idxset_size(s->outputs);
1982 /* Called from main thread */
1983 unsigned pa_source_used_by(pa_source *s) {
1986 pa_source_assert_ref(s);
1987 pa_assert_ctl_context();
1988 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1990 ret = pa_idxset_size(s->outputs);
1991 pa_assert(ret >= s->n_corked);
1993 return ret - s->n_corked;
1996 /* Called from main thread */
1997 unsigned pa_source_check_suspend(pa_source *s, pa_source_output *ignore) {
1999 pa_source_output *o;
2002 pa_source_assert_ref(s);
2003 pa_assert_ctl_context();
2005 if (!PA_SOURCE_IS_LINKED(s->state))
2010 PA_IDXSET_FOREACH(o, s->outputs, idx) {
2014 /* We do not assert here. It is perfectly valid for a source output to
2015 * be in the INIT state (i.e. created, marked done but not yet put)
2016 * and we should not care if it's unlinked as it won't contribute
2017 * towards our busy status.
2019 if (!PA_SOURCE_OUTPUT_IS_LINKED(o->state))
2022 if (o->state == PA_SOURCE_OUTPUT_CORKED)
2025 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
2034 const char *pa_source_state_to_string(pa_source_state_t state) {
2036 case PA_SOURCE_INIT: return "INIT";
2037 case PA_SOURCE_IDLE: return "IDLE";
2038 case PA_SOURCE_RUNNING: return "RUNNING";
2039 case PA_SOURCE_SUSPENDED: return "SUSPENDED";
2040 case PA_SOURCE_UNLINKED: return "UNLINKED";
2041 case PA_SOURCE_INVALID_STATE: return "INVALID_STATE";
2044 pa_assert_not_reached();
2047 /* Called from the IO thread */
2048 static void sync_output_volumes_within_thread(pa_source *s) {
2049 pa_source_output *o;
2052 pa_source_assert_ref(s);
2053 pa_source_assert_io_context(s);
2055 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2056 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
2059 o->thread_info.soft_volume = o->soft_volume;
2060 //pa_source_output_request_rewind(o, 0, true, false, false);
2064 /* Called from the IO thread. Only called for the root source in volume sharing
2065 * cases, except for internal recursive calls. */
2066 static void set_shared_volume_within_thread(pa_source *s) {
2067 pa_source_output *o;
2070 pa_source_assert_ref(s);
2072 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2074 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2075 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
2076 set_shared_volume_within_thread(o->destination_source);
2080 /* Called from IO thread, except when it is not */
2081 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2082 pa_source *s = PA_SOURCE(object);
2083 pa_source_assert_ref(s);
2085 switch ((pa_source_message_t) code) {
2087 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2088 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2090 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2092 if (o->direct_on_input) {
2093 o->thread_info.direct_on_input = o->direct_on_input;
2094 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2097 pa_source_output_attach(o);
2099 pa_source_output_set_state_within_thread(o, o->state);
2101 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2102 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2104 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2106 /* We don't just invalidate the requested latency here,
2107 * because if we are in a move we might need to fix up the
2108 * requested latency. */
2109 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2111 /* In flat volume mode we need to update the volume as
2113 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2116 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2117 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2119 pa_source_output_set_state_within_thread(o, o->state);
2121 pa_source_output_detach(o);
2123 if (o->thread_info.direct_on_input) {
2124 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2125 o->thread_info.direct_on_input = NULL;
2128 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2129 pa_source_invalidate_requested_latency(s, true);
2131 /* In flat volume mode we need to update the volume as
2133 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2136 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2137 pa_source *root_source = pa_source_get_master(s);
2139 if (PA_LIKELY(root_source))
2140 set_shared_volume_within_thread(root_source);
2145 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2147 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2149 pa_source_volume_change_push(s);
2151 /* Fall through ... */
2153 case PA_SOURCE_MESSAGE_SET_VOLUME:
2155 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2156 s->thread_info.soft_volume = s->soft_volume;
2159 /* Fall through ... */
2161 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2162 sync_output_volumes_within_thread(s);
2165 case PA_SOURCE_MESSAGE_GET_VOLUME:
2167 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2169 pa_source_volume_change_flush(s);
2170 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2173 /* In case source implementor reset SW volume. */
2174 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2175 s->thread_info.soft_volume = s->soft_volume;
2180 case PA_SOURCE_MESSAGE_SET_MUTE:
2182 if (s->thread_info.soft_muted != s->muted) {
2183 s->thread_info.soft_muted = s->muted;
2186 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2191 case PA_SOURCE_MESSAGE_GET_MUTE:
2193 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2194 return s->get_mute(s, userdata);
2198 case PA_SOURCE_MESSAGE_SET_STATE: {
2199 struct set_state_data *data = userdata;
2200 bool suspend_change =
2201 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(data->state)) ||
2202 (PA_SOURCE_IS_OPENED(s->thread_info.state) && data->state == PA_SOURCE_SUSPENDED);
2204 if (s->set_state_in_io_thread) {
2207 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2211 s->thread_info.state = data->state;
2213 if (suspend_change) {
2214 pa_source_output *o;
2217 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2218 if (o->suspend_within_thread)
2219 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2225 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2227 pa_usec_t *usec = userdata;
2228 *usec = pa_source_get_requested_latency_within_thread(s);
2230 /* Yes, that's right, the IO thread will see -1 when no
2231 * explicit requested latency is configured, the main
2232 * thread will see max_latency */
2233 if (*usec == (pa_usec_t) -1)
2234 *usec = s->thread_info.max_latency;
2239 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2240 pa_usec_t *r = userdata;
2242 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2247 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2248 pa_usec_t *r = userdata;
2250 r[0] = s->thread_info.min_latency;
2251 r[1] = s->thread_info.max_latency;
2256 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2258 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2261 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2263 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2266 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2268 *((size_t*) userdata) = s->thread_info.max_rewind;
2271 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2273 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2276 case PA_SOURCE_MESSAGE_GET_LATENCY:
2278 if (s->monitor_of) {
2279 *((int64_t*) userdata) = -pa_sink_get_latency_within_thread(s->monitor_of, true);
2283 /* Implementors need to overwrite this implementation! */
2286 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2287 /* This message is sent from IO-thread and handled in main thread. */
2288 pa_assert_ctl_context();
2290 /* Make sure we're not messing with main thread when no longer linked */
2291 if (!PA_SOURCE_IS_LINKED(s->state))
2294 pa_source_get_volume(s, true);
2295 pa_source_get_mute(s, true);
2298 case PA_SOURCE_MESSAGE_SET_PORT_LATENCY_OFFSET:
2299 s->thread_info.port_latency_offset = offset;
2302 case PA_SOURCE_MESSAGE_MAX:
2309 /* Called from main thread */
2310 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2315 pa_core_assert_ref(c);
2316 pa_assert_ctl_context();
2317 pa_assert(cause != 0);
2319 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2322 if (source->monitor_of)
2325 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2332 /* Called from IO thread */
2333 void pa_source_detach_within_thread(pa_source *s) {
2334 pa_source_output *o;
2337 pa_source_assert_ref(s);
2338 pa_source_assert_io_context(s);
2339 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2341 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2342 pa_source_output_detach(o);
2345 /* Called from IO thread */
2346 void pa_source_attach_within_thread(pa_source *s) {
2347 pa_source_output *o;
2350 pa_source_assert_ref(s);
2351 pa_source_assert_io_context(s);
2352 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2354 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2355 pa_source_output_attach(o);
2358 /* Called from IO thread */
2359 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2360 pa_usec_t result = (pa_usec_t) -1;
2361 pa_source_output *o;
2364 pa_source_assert_ref(s);
2365 pa_source_assert_io_context(s);
2367 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2368 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2370 if (s->thread_info.requested_latency_valid)
2371 return s->thread_info.requested_latency;
2373 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2374 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2375 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2376 result = o->thread_info.requested_source_latency;
2378 if (result != (pa_usec_t) -1)
2379 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2381 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2382 /* Only cache this if we are fully set up */
2383 s->thread_info.requested_latency = result;
2384 s->thread_info.requested_latency_valid = true;
2390 /* Called from main thread */
2391 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2394 pa_source_assert_ref(s);
2395 pa_assert_ctl_context();
2396 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2398 if (s->state == PA_SOURCE_SUSPENDED)
2401 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2406 /* Called from IO thread */
2407 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2408 pa_source_output *o;
2411 pa_source_assert_ref(s);
2412 pa_source_assert_io_context(s);
2414 if (max_rewind == s->thread_info.max_rewind)
2417 s->thread_info.max_rewind = max_rewind;
2419 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2420 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2421 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2424 /* Called from main thread */
2425 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2426 pa_source_assert_ref(s);
2427 pa_assert_ctl_context();
2429 if (PA_SOURCE_IS_LINKED(s->state))
2430 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2432 pa_source_set_max_rewind_within_thread(s, max_rewind);
2435 /* Called from IO thread */
2436 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2437 pa_source_output *o;
2440 pa_source_assert_ref(s);
2441 pa_source_assert_io_context(s);
2443 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2444 s->thread_info.requested_latency_valid = false;
2448 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2450 if (s->update_requested_latency)
2451 s->update_requested_latency(s);
2453 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2454 if (o->update_source_requested_latency)
2455 o->update_source_requested_latency(o);
2459 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2462 /* Called from main thread */
2463 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2464 pa_source_assert_ref(s);
2465 pa_assert_ctl_context();
2467 /* min_latency == 0: no limit
2468 * min_latency anything else: specified limit
2470 * Similar for max_latency */
2472 if (min_latency < ABSOLUTE_MIN_LATENCY)
2473 min_latency = ABSOLUTE_MIN_LATENCY;
2475 if (max_latency <= 0 ||
2476 max_latency > ABSOLUTE_MAX_LATENCY)
2477 max_latency = ABSOLUTE_MAX_LATENCY;
2479 pa_assert(min_latency <= max_latency);
2481 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2482 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2483 max_latency == ABSOLUTE_MAX_LATENCY) ||
2484 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2486 if (PA_SOURCE_IS_LINKED(s->state)) {
2492 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2494 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2497 /* Called from main thread */
2498 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2499 pa_source_assert_ref(s);
2500 pa_assert_ctl_context();
2501 pa_assert(min_latency);
2502 pa_assert(max_latency);
2504 if (PA_SOURCE_IS_LINKED(s->state)) {
2505 pa_usec_t r[2] = { 0, 0 };
2507 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2509 *min_latency = r[0];
2510 *max_latency = r[1];
2512 *min_latency = s->thread_info.min_latency;
2513 *max_latency = s->thread_info.max_latency;
2517 /* Called from IO thread, and from main thread before pa_source_put() is called */
2518 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2519 pa_source_assert_ref(s);
2520 pa_source_assert_io_context(s);
2522 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2523 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2524 pa_assert(min_latency <= max_latency);
2526 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2527 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2528 max_latency == ABSOLUTE_MAX_LATENCY) ||
2529 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2532 if (s->thread_info.min_latency == min_latency &&
2533 s->thread_info.max_latency == max_latency)
2536 s->thread_info.min_latency = min_latency;
2537 s->thread_info.max_latency = max_latency;
2539 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2540 pa_source_output *o;
2543 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2544 if (o->update_source_latency_range)
2545 o->update_source_latency_range(o);
2548 pa_source_invalidate_requested_latency(s, false);
2551 /* Called from main thread, before the source is put */
2552 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2553 pa_source_assert_ref(s);
2554 pa_assert_ctl_context();
2556 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2557 pa_assert(latency == 0);
2561 if (latency < ABSOLUTE_MIN_LATENCY)
2562 latency = ABSOLUTE_MIN_LATENCY;
2564 if (latency > ABSOLUTE_MAX_LATENCY)
2565 latency = ABSOLUTE_MAX_LATENCY;
2567 if (PA_SOURCE_IS_LINKED(s->state))
2568 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2570 s->thread_info.fixed_latency = latency;
2573 /* Called from main thread */
2574 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2577 pa_source_assert_ref(s);
2578 pa_assert_ctl_context();
2580 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2583 if (PA_SOURCE_IS_LINKED(s->state))
2584 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2586 latency = s->thread_info.fixed_latency;
2591 /* Called from IO thread */
2592 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2593 pa_source_assert_ref(s);
2594 pa_source_assert_io_context(s);
2596 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2597 pa_assert(latency == 0);
2598 s->thread_info.fixed_latency = 0;
2603 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2604 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2606 if (s->thread_info.fixed_latency == latency)
2609 s->thread_info.fixed_latency = latency;
2611 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2612 pa_source_output *o;
2615 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2616 if (o->update_source_fixed_latency)
2617 o->update_source_fixed_latency(o);
2620 pa_source_invalidate_requested_latency(s, false);
2623 /* Called from main thread */
2624 void pa_source_set_port_latency_offset(pa_source *s, int64_t offset) {
2625 pa_source_assert_ref(s);
2627 s->port_latency_offset = offset;
2629 if (PA_SOURCE_IS_LINKED(s->state))
2630 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2632 s->thread_info.port_latency_offset = offset;
2634 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_LATENCY_OFFSET_CHANGED], s);
2637 /* Called from main thread */
2638 size_t pa_source_get_max_rewind(pa_source *s) {
2640 pa_assert_ctl_context();
2641 pa_source_assert_ref(s);
2643 if (!PA_SOURCE_IS_LINKED(s->state))
2644 return s->thread_info.max_rewind;
2646 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2651 /* Called from main context */
2652 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2653 pa_device_port *port;
2655 pa_source_assert_ref(s);
2656 pa_assert_ctl_context();
2659 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2660 return -PA_ERR_NOTIMPLEMENTED;
2664 return -PA_ERR_NOENTITY;
2666 if (!(port = pa_hashmap_get(s->ports, name)))
2667 return -PA_ERR_NOENTITY;
2669 if (s->active_port == port) {
2670 s->save_port = s->save_port || save;
2674 if (s->set_port(s, port) < 0)
2675 return -PA_ERR_NOENTITY;
2677 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2679 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2681 s->active_port = port;
2682 s->save_port = save;
2684 /* The active port affects the default source selection. */
2685 pa_core_update_default_source(s->core);
2687 pa_source_set_port_latency_offset(s, s->active_port->latency_offset);
2689 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2694 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2696 /* Called from the IO thread. */
2697 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2698 pa_source_volume_change *c;
2699 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2700 c = pa_xnew(pa_source_volume_change, 1);
2702 PA_LLIST_INIT(pa_source_volume_change, c);
2704 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2708 /* Called from the IO thread. */
2709 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2711 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2715 /* Called from the IO thread. */
2716 void pa_source_volume_change_push(pa_source *s) {
2717 pa_source_volume_change *c = NULL;
2718 pa_source_volume_change *nc = NULL;
2719 pa_source_volume_change *pc = NULL;
2720 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2722 const char *direction = NULL;
2725 nc = pa_source_volume_change_new(s);
2727 /* NOTE: There is already more different volumes in pa_source that I can remember.
2728 * Adding one more volume for HW would get us rid of this, but I am trying
2729 * to survive with the ones we already have. */
2730 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2732 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2733 pa_log_debug("Volume not changing");
2734 pa_source_volume_change_free(nc);
2738 nc->at = pa_source_get_latency_within_thread(s, false);
2739 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2741 if (s->thread_info.volume_changes_tail) {
2742 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2743 /* If volume is going up let's do it a bit late. If it is going
2744 * down let's do it a bit early. */
2745 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2746 if (nc->at + safety_margin > c->at) {
2747 nc->at += safety_margin;
2752 else if (nc->at - safety_margin > c->at) {
2753 nc->at -= safety_margin;
2761 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2762 nc->at += safety_margin;
2765 nc->at -= safety_margin;
2768 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2771 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2774 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2776 /* We can ignore volume events that came earlier but should happen later than this. */
2777 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
2778 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2779 pa_source_volume_change_free(c);
2782 s->thread_info.volume_changes_tail = nc;
2785 /* Called from the IO thread. */
2786 static void pa_source_volume_change_flush(pa_source *s) {
2787 pa_source_volume_change *c = s->thread_info.volume_changes;
2789 s->thread_info.volume_changes = NULL;
2790 s->thread_info.volume_changes_tail = NULL;
2792 pa_source_volume_change *next = c->next;
2793 pa_source_volume_change_free(c);
2798 /* Called from the IO thread. */
2799 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2805 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2811 pa_assert(s->write_volume);
2813 now = pa_rtclock_now();
2815 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2816 pa_source_volume_change *c = s->thread_info.volume_changes;
2817 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2818 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2819 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2821 s->thread_info.current_hw_volume = c->hw_volume;
2822 pa_source_volume_change_free(c);
2828 if (s->thread_info.volume_changes) {
2830 *usec_to_next = s->thread_info.volume_changes->at - now;
2831 if (pa_log_ratelimit(PA_LOG_DEBUG))
2832 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2837 s->thread_info.volume_changes_tail = NULL;
2842 /* Called from the main thread */
2843 /* Gets the list of formats supported by the source. The members and idxset must
2844 * be freed by the caller. */
2845 pa_idxset* pa_source_get_formats(pa_source *s) {
2850 if (s->get_formats) {
2851 /* Source supports format query, all is good */
2852 ret = s->get_formats(s);
2854 /* Source doesn't support format query, so assume it does PCM */
2855 pa_format_info *f = pa_format_info_new();
2856 f->encoding = PA_ENCODING_PCM;
2858 ret = pa_idxset_new(NULL, NULL);
2859 pa_idxset_put(ret, f, NULL);
2865 /* Called from the main thread */
2866 /* Checks if the source can accept this format */
2867 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2868 pa_idxset *formats = NULL;
2874 formats = pa_source_get_formats(s);
2877 pa_format_info *finfo_device;
2880 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2881 if (pa_format_info_is_compatible(finfo_device, f)) {
2887 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2893 /* Called from the main thread */
2894 /* Calculates the intersection between formats supported by the source and
2895 * in_formats, and returns these, in the order of the source's formats. */
2896 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2897 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2898 pa_format_info *f_source, *f_in;
2903 if (!in_formats || pa_idxset_isempty(in_formats))
2906 source_formats = pa_source_get_formats(s);
2908 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2909 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2910 if (pa_format_info_is_compatible(f_source, f_in))
2911 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2917 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2922 /* Called from the main thread */
2923 void pa_source_set_sample_format(pa_source *s, pa_sample_format_t format) {
2924 pa_sample_format_t old_format;
2927 pa_assert(pa_sample_format_valid(format));
2929 old_format = s->sample_spec.format;
2930 if (old_format == format)
2933 pa_log_info("%s: format: %s -> %s",
2934 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
2936 s->sample_spec.format = format;
2938 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2941 /* Called from the main thread */
2942 void pa_source_set_sample_rate(pa_source *s, uint32_t rate) {
2946 pa_assert(pa_sample_rate_valid(rate));
2948 old_rate = s->sample_spec.rate;
2949 if (old_rate == rate)
2952 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
2954 s->sample_spec.rate = rate;
2956 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2959 /* Called from the main thread. */
2960 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2961 pa_cvolume old_volume;
2962 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2963 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2968 old_volume = s->reference_volume;
2970 if (pa_cvolume_equal(volume, &old_volume))
2973 s->reference_volume = *volume;
2974 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
2975 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
2976 s->flags & PA_SOURCE_DECIBEL_VOLUME),
2977 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
2978 s->flags & PA_SOURCE_DECIBEL_VOLUME));
2980 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2981 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_VOLUME_CHANGED], s);