2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
55 #define MAX_MIX_CHANNELS 32
56 #define MIX_BUFFER_LENGTH (pa_page_size())
57 #define ABSOLUTE_MIN_LATENCY (500)
58 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
59 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
61 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
63 struct pa_sink_volume_change {
67 PA_LLIST_FIELDS(pa_sink_volume_change);
70 struct sink_message_set_port {
75 static void sink_free(pa_object *s);
77 static void pa_sink_volume_change_push(pa_sink *s);
78 static void pa_sink_volume_change_flush(pa_sink *s);
79 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
82 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
84 char *dump_time = NULL, *dump_path_surfix = NULL;
85 const char *s_device_api_str, *card_name_str, *device_idx_str;
90 /* open file for dump pcm */
91 if (s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
92 pa_gettimeofday(&now);
93 localtime_r(&now.tv_sec, &tm);
94 memset(&datetime[0], 0x00, sizeof(datetime));
95 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
96 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
98 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
99 if (pa_streq(s_device_api_str, "alsa")) {
100 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
101 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
102 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
104 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
107 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
110 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
111 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
113 s->pcm_dump_fp = fopen(s->dump_path, "w");
115 pa_log_warn("%s open failed", s->dump_path);
117 pa_log_info("%s opened", s->dump_path);
120 pa_xfree(dump_path_surfix);
121 /* close file for dump pcm when config is changed */
122 } else if (~s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && s->pcm_dump_fp) {
123 fclose(s->pcm_dump_fp);
124 pa_log_info("%s closed", s->dump_path);
125 pa_xfree(s->dump_path);
126 s->pcm_dump_fp = NULL;
130 if (s->pcm_dump_fp) {
133 ptr = pa_memblock_acquire(chunk->memblock);
135 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
137 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
139 pa_memblock_release(chunk->memblock);
144 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
148 data->proplist = pa_proplist_new();
149 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
154 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
157 pa_xfree(data->name);
158 data->name = pa_xstrdup(name);
161 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
164 if ((data->sample_spec_is_set = !!spec))
165 data->sample_spec = *spec;
168 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
171 if ((data->channel_map_is_set = !!map))
172 data->channel_map = *map;
175 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
178 data->alternate_sample_rate_is_set = true;
179 data->alternate_sample_rate = alternate_sample_rate;
182 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
185 if ((data->volume_is_set = !!volume))
186 data->volume = *volume;
189 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
192 data->muted_is_set = true;
196 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
199 pa_xfree(data->active_port);
200 data->active_port = pa_xstrdup(port);
203 void pa_sink_new_data_done(pa_sink_new_data *data) {
206 pa_proplist_free(data->proplist);
209 pa_hashmap_free(data->ports);
211 pa_xfree(data->name);
212 pa_xfree(data->active_port);
215 /* Called from main context */
216 static void reset_callbacks(pa_sink *s) {
220 s->get_volume = NULL;
221 s->set_volume = NULL;
222 s->write_volume = NULL;
225 s->request_rewind = NULL;
226 s->update_requested_latency = NULL;
228 s->get_formats = NULL;
229 s->set_formats = NULL;
230 s->reconfigure = NULL;
233 /* Called from main context */
234 pa_sink* pa_sink_new(
236 pa_sink_new_data *data,
237 pa_sink_flags_t flags) {
241 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
242 pa_source_new_data source_data;
248 pa_assert(data->name);
249 pa_assert_ctl_context();
251 s = pa_msgobject_new(pa_sink);
253 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
254 pa_log_debug("Failed to register name %s.", data->name);
259 pa_sink_new_data_set_name(data, name);
261 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
263 pa_namereg_unregister(core, name);
267 /* FIXME, need to free s here on failure */
269 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
270 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
272 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
274 if (!data->channel_map_is_set)
275 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
277 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
278 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
280 /* FIXME: There should probably be a general function for checking whether
281 * the sink volume is allowed to be set, like there is for sink inputs. */
282 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
284 if (!data->volume_is_set) {
285 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
286 data->save_volume = false;
289 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
290 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
292 if (!data->muted_is_set)
296 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
298 pa_device_init_description(data->proplist, data->card);
299 pa_device_init_icon(data->proplist, true);
300 pa_device_init_intended_roles(data->proplist);
302 if (!data->active_port) {
303 pa_device_port *p = pa_device_port_find_best(data->ports);
305 pa_sink_new_data_set_port(data, p->name);
308 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
310 pa_namereg_unregister(core, name);
314 s->parent.parent.free = sink_free;
315 s->parent.process_msg = pa_sink_process_msg;
318 s->state = PA_SINK_INIT;
321 s->suspend_cause = data->suspend_cause;
322 pa_sink_set_mixer_dirty(s, false);
323 s->name = pa_xstrdup(name);
324 s->proplist = pa_proplist_copy(data->proplist);
325 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
326 s->module = data->module;
327 s->card = data->card;
329 s->priority = pa_device_init_priority(s->proplist);
331 s->sample_spec = data->sample_spec;
332 s->channel_map = data->channel_map;
333 s->default_sample_rate = s->sample_spec.rate;
335 if (data->alternate_sample_rate_is_set)
336 s->alternate_sample_rate = data->alternate_sample_rate;
338 s->alternate_sample_rate = s->core->alternate_sample_rate;
340 s->avoid_resampling = data->avoid_resampling;
343 s->inputs = pa_idxset_new(NULL, NULL);
345 s->input_to_master = NULL;
347 s->reference_volume = s->real_volume = data->volume;
348 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
349 s->base_volume = PA_VOLUME_NORM;
350 s->n_volume_steps = PA_VOLUME_NORM+1;
351 s->muted = data->muted;
352 s->refresh_volume = s->refresh_muted = false;
359 /* As a minor optimization we just steal the list instead of
361 s->ports = data->ports;
364 s->active_port = NULL;
365 s->save_port = false;
367 if (data->active_port)
368 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
369 s->save_port = data->save_port;
371 /* Hopefully the active port has already been assigned in the previous call
372 to pa_device_port_find_best, but better safe than sorry */
374 s->active_port = pa_device_port_find_best(s->ports);
377 s->port_latency_offset = s->active_port->latency_offset;
379 s->port_latency_offset = 0;
381 s->save_volume = data->save_volume;
382 s->save_muted = data->save_muted;
383 #ifdef TIZEN_PCM_DUMP
384 s->pcm_dump_fp = NULL;
388 pa_silence_memchunk_get(
389 &core->silence_cache,
395 s->thread_info.rtpoll = NULL;
396 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
397 (pa_free_cb_t) pa_sink_input_unref);
398 s->thread_info.soft_volume = s->soft_volume;
399 s->thread_info.soft_muted = s->muted;
400 s->thread_info.state = s->state;
401 s->thread_info.rewind_nbytes = 0;
402 s->thread_info.rewind_requested = false;
403 s->thread_info.max_rewind = 0;
404 s->thread_info.max_request = 0;
405 s->thread_info.requested_latency_valid = false;
406 s->thread_info.requested_latency = 0;
407 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
408 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
409 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
411 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
412 s->thread_info.volume_changes_tail = NULL;
413 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
414 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
415 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
416 s->thread_info.port_latency_offset = s->port_latency_offset;
418 /* FIXME: This should probably be moved to pa_sink_put() */
419 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
422 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
424 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
425 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
428 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
429 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
433 pa_source_new_data_init(&source_data);
434 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
435 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
436 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
437 source_data.name = pa_sprintf_malloc("%s.monitor", name);
438 source_data.driver = data->driver;
439 source_data.module = data->module;
440 source_data.card = data->card;
442 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
443 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
444 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
446 s->monitor_source = pa_source_new(core, &source_data,
447 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
448 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
450 pa_source_new_data_done(&source_data);
452 if (!s->monitor_source) {
458 s->monitor_source->monitor_of = s;
460 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
461 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
462 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
467 /* Called from main context */
468 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
471 bool suspend_cause_changed;
476 pa_assert_ctl_context();
478 state_changed = state != s->state;
479 suspend_cause_changed = suspend_cause != s->suspend_cause;
481 if (!state_changed && !suspend_cause_changed)
484 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
485 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
487 /* If we are resuming, suspend_cause must be 0. */
488 pa_assert(!resuming || !suspend_cause);
490 /* Here's something to think about: what to do with the suspend cause if
491 * resuming the sink fails? The old suspend cause will be incorrect, so we
492 * can't use that. On the other hand, if we set no suspend cause (as is the
493 * case currently), then it looks strange to have a sink suspended without
494 * any cause. It might be a good idea to add a new "resume failed" suspend
495 * cause, or it might just add unnecessary complexity, given that the
496 * current approach of not setting any suspend cause works well enough. */
498 if (s->set_state && state_changed) {
499 ret = s->set_state(s, state);
500 /* set_state() is allowed to fail only when resuming. */
501 pa_assert(ret >= 0 || resuming);
504 if (ret >= 0 && s->asyncmsgq && state_changed)
505 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
506 /* SET_STATE is allowed to fail only when resuming. */
510 s->set_state(s, PA_SINK_SUSPENDED);
513 #ifdef TIZEN_PCM_DUMP
514 /* close file for dump pcm */
515 if (s->pcm_dump_fp && (s->core->pcm_dump_option & PA_PCM_DUMP_OPTION_SEPARATED) && suspending) {
516 fclose(s->pcm_dump_fp);
517 pa_log_info("%s closed", s->dump_path);
518 pa_xfree(s->dump_path);
519 s->pcm_dump_fp = NULL;
522 if (suspend_cause_changed) {
523 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
524 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
526 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
527 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
528 s->suspend_cause = suspend_cause;
535 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
538 /* If we enter UNLINKED state, then we don't send change notifications.
539 * pa_sink_unlink() will send unlink notifications instead. */
540 if (state != PA_SINK_UNLINKED) {
541 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
542 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
546 if (suspending || resuming) {
550 /* We're suspending or resuming, tell everyone about it */
552 PA_IDXSET_FOREACH(i, s->inputs, idx)
553 if (s->state == PA_SINK_SUSPENDED &&
554 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
555 pa_sink_input_kill(i);
557 i->suspend(i, state == PA_SINK_SUSPENDED);
561 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
562 pa_source_sync_suspend(s->monitor_source);
567 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
573 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
574 pa_sink_flags_t flags;
577 pa_assert(!s->write_volume || cb);
581 /* Save the current flags so we can tell if they've changed */
585 /* The sink implementor is responsible for setting decibel volume support */
586 s->flags |= PA_SINK_HW_VOLUME_CTRL;
588 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
589 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
590 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
593 /* If the flags have changed after init, let any clients know via a change event */
594 if (s->state != PA_SINK_INIT && flags != s->flags)
595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
598 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
599 pa_sink_flags_t flags;
602 pa_assert(!cb || s->set_volume);
604 s->write_volume = cb;
606 /* Save the current flags so we can tell if they've changed */
610 s->flags |= PA_SINK_DEFERRED_VOLUME;
612 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
614 /* If the flags have changed after init, let any clients know via a change event */
615 if (s->state != PA_SINK_INIT && flags != s->flags)
616 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
619 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
625 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
626 pa_sink_flags_t flags;
632 /* Save the current flags so we can tell if they've changed */
636 s->flags |= PA_SINK_HW_MUTE_CTRL;
638 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
640 /* If the flags have changed after init, let any clients know via a change event */
641 if (s->state != PA_SINK_INIT && flags != s->flags)
642 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
645 static void enable_flat_volume(pa_sink *s, bool enable) {
646 pa_sink_flags_t flags;
650 /* Always follow the overall user preference here */
651 enable = enable && s->core->flat_volumes;
653 /* Save the current flags so we can tell if they've changed */
657 s->flags |= PA_SINK_FLAT_VOLUME;
659 s->flags &= ~PA_SINK_FLAT_VOLUME;
661 /* If the flags have changed after init, let any clients know via a change event */
662 if (s->state != PA_SINK_INIT && flags != s->flags)
663 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
666 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
667 pa_sink_flags_t flags;
671 /* Save the current flags so we can tell if they've changed */
675 s->flags |= PA_SINK_DECIBEL_VOLUME;
676 enable_flat_volume(s, true);
678 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
679 enable_flat_volume(s, false);
682 /* If the flags have changed after init, let any clients know via a change event */
683 if (s->state != PA_SINK_INIT && flags != s->flags)
684 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
687 /* Called from main context */
688 void pa_sink_put(pa_sink* s) {
689 pa_sink_assert_ref(s);
690 pa_assert_ctl_context();
692 pa_assert(s->state == PA_SINK_INIT);
693 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
695 /* The following fields must be initialized properly when calling _put() */
696 pa_assert(s->asyncmsgq);
697 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
699 /* Generally, flags should be initialized via pa_sink_new(). As a
700 * special exception we allow some volume related flags to be set
701 * between _new() and _put() by the callback setter functions above.
703 * Thus we implement a couple safeguards here which ensure the above
704 * setters were used (or at least the implementor made manual changes
705 * in a compatible way).
707 * Note: All of these flags set here can change over the life time
709 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
710 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
711 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
713 /* XXX: Currently decibel volume is disabled for all sinks that use volume
714 * sharing. When the master sink supports decibel volume, it would be good
715 * to have the flag also in the filter sink, but currently we don't do that
716 * so that the flags of the filter sink never change when it's moved from
717 * a master sink to another. One solution for this problem would be to
718 * remove user-visible volume altogether from filter sinks when volume
719 * sharing is used, but the current approach was easier to implement... */
720 /* We always support decibel volumes in software, otherwise we leave it to
721 * the sink implementor to set this flag as needed.
723 * Note: This flag can also change over the life time of the sink. */
724 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
725 pa_sink_enable_decibel_volume(s, true);
726 s->soft_volume = s->reference_volume;
729 /* If the sink implementor support DB volumes by itself, we should always
730 * try and enable flat volumes too */
731 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
732 enable_flat_volume(s, true);
734 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
735 pa_sink *root_sink = pa_sink_get_master(s);
737 pa_assert(root_sink);
739 s->reference_volume = root_sink->reference_volume;
740 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
742 s->real_volume = root_sink->real_volume;
743 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
745 /* We assume that if the sink implementor changed the default
746 * volume he did so in real_volume, because that is the usual
747 * place where he is supposed to place his changes. */
748 s->reference_volume = s->real_volume;
750 s->thread_info.soft_volume = s->soft_volume;
751 s->thread_info.soft_muted = s->muted;
752 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
754 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
755 || (s->base_volume == PA_VOLUME_NORM
756 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
757 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
758 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
759 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
760 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
762 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
763 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
764 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
766 if (s->suspend_cause)
767 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
769 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
771 pa_source_put(s->monitor_source);
773 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
774 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
776 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
777 * because module-switch-on-connect needs to know the old default sink */
778 pa_core_update_default_sink(s->core);
781 /* Called from main context */
782 void pa_sink_unlink(pa_sink* s) {
784 pa_sink_input *i, PA_UNUSED *j = NULL;
786 pa_sink_assert_ref(s);
787 pa_assert_ctl_context();
789 /* Please note that pa_sink_unlink() does more than simply
790 * reversing pa_sink_put(). It also undoes the registrations
791 * already done in pa_sink_new()! */
793 if (s->unlink_requested)
796 s->unlink_requested = true;
798 linked = PA_SINK_IS_LINKED(s->state);
801 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
803 if (s->state != PA_SINK_UNLINKED)
804 pa_namereg_unregister(s->core, s->name);
805 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
807 pa_core_update_default_sink(s->core);
810 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
812 while ((i = pa_idxset_first(s->inputs, NULL))) {
814 pa_sink_input_kill(i);
819 sink_set_state(s, PA_SINK_UNLINKED, 0);
821 s->state = PA_SINK_UNLINKED;
825 if (s->monitor_source)
826 pa_source_unlink(s->monitor_source);
829 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
830 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
834 /* Called from main context */
835 static void sink_free(pa_object *o) {
836 pa_sink *s = PA_SINK(o);
839 pa_assert_ctl_context();
840 pa_assert(pa_sink_refcnt(s) == 0);
841 pa_assert(!PA_SINK_IS_LINKED(s->state));
843 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
845 pa_sink_volume_change_flush(s);
847 if (s->monitor_source) {
848 pa_source_unref(s->monitor_source);
849 s->monitor_source = NULL;
852 pa_idxset_free(s->inputs, NULL);
853 pa_hashmap_free(s->thread_info.inputs);
855 if (s->silence.memblock)
856 pa_memblock_unref(s->silence.memblock);
862 pa_proplist_free(s->proplist);
865 pa_hashmap_free(s->ports);
867 #ifdef TIZEN_PCM_DUMP
868 /* close file for dump pcm */
869 if (s->pcm_dump_fp) {
870 fclose(s->pcm_dump_fp);
871 pa_log_info("%s closed", s->dump_path);
872 pa_xfree(s->dump_path);
873 s->pcm_dump_fp = NULL;
879 /* Called from main context, and not while the IO thread is active, please */
880 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
881 pa_sink_assert_ref(s);
882 pa_assert_ctl_context();
886 if (s->monitor_source)
887 pa_source_set_asyncmsgq(s->monitor_source, q);
890 /* Called from main context, and not while the IO thread is active, please */
891 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
892 pa_sink_flags_t old_flags;
893 pa_sink_input *input;
896 pa_sink_assert_ref(s);
897 pa_assert_ctl_context();
899 /* For now, allow only a minimal set of flags to be changed. */
900 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
902 old_flags = s->flags;
903 s->flags = (s->flags & ~mask) | (value & mask);
905 if (s->flags == old_flags)
908 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
909 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
911 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
912 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
913 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
915 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
916 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
918 if (s->monitor_source)
919 pa_source_update_flags(s->monitor_source,
920 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
921 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
922 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
923 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
925 PA_IDXSET_FOREACH(input, s->inputs, idx) {
926 if (input->origin_sink)
927 pa_sink_update_flags(input->origin_sink, mask, value);
931 /* Called from IO context, or before _put() from main context */
932 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
933 pa_sink_assert_ref(s);
934 pa_sink_assert_io_context(s);
936 s->thread_info.rtpoll = p;
938 if (s->monitor_source)
939 pa_source_set_rtpoll(s->monitor_source, p);
942 /* Called from main context */
943 int pa_sink_update_status(pa_sink*s) {
944 pa_sink_assert_ref(s);
945 pa_assert_ctl_context();
946 pa_assert(PA_SINK_IS_LINKED(s->state));
948 if (s->state == PA_SINK_SUSPENDED)
951 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
954 /* Called from any context - must be threadsafe */
955 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
956 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
959 /* Called from main context */
960 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
961 pa_suspend_cause_t merged_cause;
963 pa_sink_assert_ref(s);
964 pa_assert_ctl_context();
965 pa_assert(PA_SINK_IS_LINKED(s->state));
966 pa_assert(cause != 0);
969 merged_cause = s->suspend_cause | cause;
971 merged_cause = s->suspend_cause & ~cause;
973 if (!(merged_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
974 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
975 it'll be handled just fine. */
976 pa_sink_set_mixer_dirty(s, false);
977 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
978 if (s->active_port && s->set_port) {
979 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
980 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
981 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
984 s->set_port(s, s->active_port);
995 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
997 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1000 /* Called from main context */
1001 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1002 pa_sink_input *i, *n;
1005 pa_sink_assert_ref(s);
1006 pa_assert_ctl_context();
1007 pa_assert(PA_SINK_IS_LINKED(s->state));
1012 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1013 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1015 pa_sink_input_ref(i);
1017 if (pa_sink_input_start_move(i) >= 0)
1018 pa_queue_push(q, i);
1020 pa_sink_input_unref(i);
1026 /* Called from main context */
1027 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1030 pa_sink_assert_ref(s);
1031 pa_assert_ctl_context();
1032 pa_assert(PA_SINK_IS_LINKED(s->state));
1035 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1036 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1037 if (pa_sink_input_finish_move(i, s, save) < 0)
1038 pa_sink_input_fail_move(i);
1041 pa_sink_input_unref(i);
1044 pa_queue_free(q, NULL);
1047 /* Called from main context */
1048 void pa_sink_move_all_fail(pa_queue *q) {
1051 pa_assert_ctl_context();
1054 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1055 pa_sink_input_fail_move(i);
1056 pa_sink_input_unref(i);
1059 pa_queue_free(q, NULL);
1062 /* Called from IO thread context */
1063 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1068 pa_sink_assert_ref(s);
1069 pa_sink_assert_io_context(s);
1071 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1072 size_t uf = i->thread_info.underrun_for_sink;
1074 /* Propagate down the filter tree */
1075 if (i->origin_sink) {
1076 size_t filter_result, left_to_play_origin;
1078 /* The recursive call works in the origin sink domain ... */
1079 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1081 /* .. and returns the time to sleep before waking up. We need the
1082 * underrun duration for comparisons, so we undo the subtraction on
1083 * the return value... */
1084 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1086 /* ... and convert it back to the master sink domain */
1087 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1089 /* Remember the longest underrun so far */
1090 if (filter_result > result)
1091 result = filter_result;
1095 /* No underrun here, move on */
1097 } else if (uf >= left_to_play) {
1098 /* The sink has possibly consumed all the data the sink input provided */
1099 pa_sink_input_process_underrun(i);
1100 } else if (uf > result) {
1101 /* Remember the longest underrun so far */
1107 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1108 (long) result, (long) left_to_play - result);
1109 return left_to_play - result;
1112 /* Called from IO thread context */
1113 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1117 pa_sink_assert_ref(s);
1118 pa_sink_assert_io_context(s);
1119 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1121 /* If nobody requested this and this is actually no real rewind
1122 * then we can short cut this. Please note that this means that
1123 * not all rewind requests triggered upstream will always be
1124 * translated in actual requests! */
1125 if (!s->thread_info.rewind_requested && nbytes <= 0)
1128 s->thread_info.rewind_nbytes = 0;
1129 s->thread_info.rewind_requested = false;
1132 pa_log_debug("Processing rewind...");
1133 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1134 pa_sink_volume_change_rewind(s, nbytes);
1135 #ifdef TIZEN_PCM_DUMP
1138 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1142 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1143 pa_sink_input_assert_ref(i);
1144 pa_sink_input_process_rewind(i, nbytes);
1148 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1149 pa_source_process_rewind(s->monitor_source, nbytes);
1153 /* Called from IO thread context */
1154 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1158 size_t mixlength = *length;
1160 pa_sink_assert_ref(s);
1161 pa_sink_assert_io_context(s);
1164 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1165 pa_sink_input_assert_ref(i);
1167 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1169 if (mixlength == 0 || info->chunk.length < mixlength)
1170 mixlength = info->chunk.length;
1172 if (pa_memblock_is_silence(info->chunk.memblock)) {
1173 pa_memblock_unref(info->chunk.memblock);
1177 info->userdata = pa_sink_input_ref(i);
1179 pa_assert(info->chunk.memblock);
1180 pa_assert(info->chunk.length > 0);
1188 *length = mixlength;
1193 /* Called from IO thread context */
1194 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1198 unsigned n_unreffed = 0;
1200 pa_sink_assert_ref(s);
1201 pa_sink_assert_io_context(s);
1203 pa_assert(result->memblock);
1204 pa_assert(result->length > 0);
1206 /* We optimize for the case where the order of the inputs has not changed */
1208 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1210 pa_mix_info* m = NULL;
1212 pa_sink_input_assert_ref(i);
1214 /* Let's try to find the matching entry info the pa_mix_info array */
1215 for (j = 0; j < n; j ++) {
1217 if (info[p].userdata == i) {
1227 /* Drop read data */
1228 pa_sink_input_drop(i, result->length);
1230 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1232 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1233 void *ostate = NULL;
1234 pa_source_output *o;
1237 if (m && m->chunk.memblock) {
1239 pa_memblock_ref(c.memblock);
1240 pa_assert(result->length <= c.length);
1241 c.length = result->length;
1243 pa_memchunk_make_writable(&c, 0);
1244 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1247 pa_memblock_ref(c.memblock);
1248 pa_assert(result->length <= c.length);
1249 c.length = result->length;
1252 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1253 pa_source_output_assert_ref(o);
1254 pa_assert(o->direct_on_input == i);
1255 pa_source_post_direct(s->monitor_source, o, &c);
1258 pa_memblock_unref(c.memblock);
1263 if (m->chunk.memblock) {
1264 pa_memblock_unref(m->chunk.memblock);
1265 pa_memchunk_reset(&m->chunk);
1268 pa_sink_input_unref(m->userdata);
1275 /* Now drop references to entries that are included in the
1276 * pa_mix_info array but don't exist anymore */
1278 if (n_unreffed < n) {
1279 for (; n > 0; info++, n--) {
1281 pa_sink_input_unref(info->userdata);
1282 if (info->chunk.memblock)
1283 pa_memblock_unref(info->chunk.memblock);
1287 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1288 pa_source_post(s->monitor_source, result);
1291 /* Called from IO thread context */
1292 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1293 pa_mix_info info[MAX_MIX_CHANNELS];
1295 size_t block_size_max;
1297 pa_sink_assert_ref(s);
1298 pa_sink_assert_io_context(s);
1299 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1300 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1303 pa_assert(!s->thread_info.rewind_requested);
1304 pa_assert(s->thread_info.rewind_nbytes == 0);
1306 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1307 result->memblock = pa_memblock_ref(s->silence.memblock);
1308 result->index = s->silence.index;
1309 result->length = PA_MIN(s->silence.length, length);
1316 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1318 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1319 if (length > block_size_max)
1320 length = pa_frame_align(block_size_max, &s->sample_spec);
1322 pa_assert(length > 0);
1324 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1328 *result = s->silence;
1329 pa_memblock_ref(result->memblock);
1331 if (result->length > length)
1332 result->length = length;
1334 } else if (n == 1) {
1337 *result = info[0].chunk;
1338 pa_memblock_ref(result->memblock);
1340 if (result->length > length)
1341 result->length = length;
1343 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1345 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1346 pa_memblock_unref(result->memblock);
1347 pa_silence_memchunk_get(&s->core->silence_cache,
1352 } else if (!pa_cvolume_is_norm(&volume)) {
1353 pa_memchunk_make_writable(result, 0);
1354 pa_volume_memchunk(result, &s->sample_spec, &volume);
1358 result->memblock = pa_memblock_new(s->core->mempool, length);
1360 ptr = pa_memblock_acquire(result->memblock);
1361 result->length = pa_mix(info, n,
1364 &s->thread_info.soft_volume,
1365 s->thread_info.soft_muted);
1366 pa_memblock_release(result->memblock);
1371 inputs_drop(s, info, n, result);
1373 #ifdef TIZEN_PCM_DUMP
1374 pa_sink_write_pcm_dump(s, result);
1379 /* Called from IO thread context */
1380 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1381 pa_mix_info info[MAX_MIX_CHANNELS];
1383 size_t length, block_size_max;
1385 pa_sink_assert_ref(s);
1386 pa_sink_assert_io_context(s);
1387 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1389 pa_assert(target->memblock);
1390 pa_assert(target->length > 0);
1391 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1393 pa_assert(!s->thread_info.rewind_requested);
1394 pa_assert(s->thread_info.rewind_nbytes == 0);
1396 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1397 pa_silence_memchunk(target, &s->sample_spec);
1403 length = target->length;
1404 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1405 if (length > block_size_max)
1406 length = pa_frame_align(block_size_max, &s->sample_spec);
1408 pa_assert(length > 0);
1410 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1413 if (target->length > length)
1414 target->length = length;
1416 pa_silence_memchunk(target, &s->sample_spec);
1417 } else if (n == 1) {
1420 if (target->length > length)
1421 target->length = length;
1423 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1425 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1426 pa_silence_memchunk(target, &s->sample_spec);
1430 vchunk = info[0].chunk;
1431 pa_memblock_ref(vchunk.memblock);
1433 if (vchunk.length > length)
1434 vchunk.length = length;
1436 if (!pa_cvolume_is_norm(&volume)) {
1437 pa_memchunk_make_writable(&vchunk, 0);
1438 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1441 pa_memchunk_memcpy(target, &vchunk);
1442 pa_memblock_unref(vchunk.memblock);
1448 ptr = pa_memblock_acquire(target->memblock);
1450 target->length = pa_mix(info, n,
1451 (uint8_t*) ptr + target->index, length,
1453 &s->thread_info.soft_volume,
1454 s->thread_info.soft_muted);
1456 pa_memblock_release(target->memblock);
1459 inputs_drop(s, info, n, target);
1461 #ifdef TIZEN_PCM_DUMP
1462 pa_sink_write_pcm_dump(s, target);
1467 /* Called from IO thread context */
1468 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1472 pa_sink_assert_ref(s);
1473 pa_sink_assert_io_context(s);
1474 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1476 pa_assert(target->memblock);
1477 pa_assert(target->length > 0);
1478 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1480 pa_assert(!s->thread_info.rewind_requested);
1481 pa_assert(s->thread_info.rewind_nbytes == 0);
1483 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1484 pa_silence_memchunk(target, &s->sample_spec);
1497 pa_sink_render_into(s, &chunk);
1506 /* Called from IO thread context */
1507 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1508 pa_sink_assert_ref(s);
1509 pa_sink_assert_io_context(s);
1510 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1511 pa_assert(length > 0);
1512 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1515 pa_assert(!s->thread_info.rewind_requested);
1516 pa_assert(s->thread_info.rewind_nbytes == 0);
1520 pa_sink_render(s, length, result);
1522 if (result->length < length) {
1525 pa_memchunk_make_writable(result, length);
1527 chunk.memblock = result->memblock;
1528 chunk.index = result->index + result->length;
1529 chunk.length = length - result->length;
1531 pa_sink_render_into_full(s, &chunk);
1533 result->length = length;
1539 /* Called from main thread */
1540 int pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1542 pa_sample_spec desired_spec;
1543 uint32_t default_rate = s->default_sample_rate;
1544 uint32_t alternate_rate = s->alternate_sample_rate;
1547 bool default_rate_is_usable = false;
1548 bool alternate_rate_is_usable = false;
1550 bool avoid_resampling = s->avoid_resampling;
1552 bool avoid_resampling = s->core->avoid_resampling;
1555 /* We currently only try to reconfigure the sample rate */
1557 if (pa_sample_spec_equal(spec, &s->sample_spec))
1560 if (!s->reconfigure)
1563 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1564 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1568 if (PA_SINK_IS_RUNNING(s->state)) {
1569 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1570 s->sample_spec.rate);
1574 if (s->monitor_source) {
1575 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1576 pa_log_info("Cannot update rate, monitor source is RUNNING");
1581 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1584 desired_spec = s->sample_spec;
1587 /* We have to try to use the sink input rate */
1588 desired_spec.rate = spec->rate;
1590 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1591 /* We just try to set the sink input's sample rate if it's not too low */
1592 desired_spec.rate = spec->rate;
1594 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1595 /* We can directly try to use this rate */
1596 desired_spec.rate = spec->rate;
1599 /* See if we can pick a rate that results in less resampling effort */
1600 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1601 default_rate_is_usable = true;
1602 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1603 default_rate_is_usable = true;
1604 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1605 alternate_rate_is_usable = true;
1606 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1607 alternate_rate_is_usable = true;
1609 if (alternate_rate_is_usable && !default_rate_is_usable)
1610 desired_spec.rate = alternate_rate;
1612 desired_spec.rate = default_rate;
1615 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1618 if (!passthrough && pa_sink_used_by(s) > 0)
1622 pa_log_debug("Suspending sink %s due to changing format, desired rate = %u", s->name, desired_spec.rate);
1624 pa_log_debug("Suspending sink %s due to changing format", s->name);
1626 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1628 if (s->reconfigure(s, &desired_spec, passthrough) >= 0) {
1629 /* update monitor source as well */
1630 if (s->monitor_source && !passthrough)
1631 pa_source_reconfigure(s->monitor_source, &desired_spec, false);
1632 pa_log_info("Changed format successfully");
1634 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1635 if (i->state == PA_SINK_INPUT_CORKED)
1636 pa_sink_input_update_rate(i);
1642 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1647 /* Called from main thread */
1648 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1651 pa_sink_assert_ref(s);
1652 pa_assert_ctl_context();
1653 pa_assert(PA_SINK_IS_LINKED(s->state));
1655 /* The returned value is supposed to be in the time domain of the sound card! */
1657 if (s->state == PA_SINK_SUSPENDED)
1660 if (!(s->flags & PA_SINK_LATENCY))
1663 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1665 /* the return value is unsigned, so check that the offset can be added to usec without
1667 if (-s->port_latency_offset <= usec)
1668 usec += s->port_latency_offset;
1672 return (pa_usec_t)usec;
1675 /* Called from IO thread */
1676 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1680 pa_sink_assert_ref(s);
1681 pa_sink_assert_io_context(s);
1682 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1684 /* The returned value is supposed to be in the time domain of the sound card! */
1686 if (s->thread_info.state == PA_SINK_SUSPENDED)
1689 if (!(s->flags & PA_SINK_LATENCY))
1692 o = PA_MSGOBJECT(s);
1694 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1696 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1698 /* If allow_negative is false, the call should only return positive values, */
1699 usec += s->thread_info.port_latency_offset;
1700 if (!allow_negative && usec < 0)
1706 /* Called from the main thread (and also from the IO thread while the main
1707 * thread is waiting).
1709 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1710 * set. Instead, flat volume mode is detected by checking whether the root sink
1711 * has the flag set. */
1712 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1713 pa_sink_assert_ref(s);
1715 s = pa_sink_get_master(s);
1718 return (s->flags & PA_SINK_FLAT_VOLUME);
1723 /* Called from the main thread (and also from the IO thread while the main
1724 * thread is waiting). */
1725 pa_sink *pa_sink_get_master(pa_sink *s) {
1726 pa_sink_assert_ref(s);
1728 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1729 if (PA_UNLIKELY(!s->input_to_master))
1732 s = s->input_to_master->sink;
1738 /* Called from main context */
1739 bool pa_sink_is_filter(pa_sink *s) {
1740 pa_sink_assert_ref(s);
1742 return (s->input_to_master != NULL);
1745 /* Called from main context */
1746 bool pa_sink_is_passthrough(pa_sink *s) {
1747 pa_sink_input *alt_i;
1750 pa_sink_assert_ref(s);
1752 /* one and only one PASSTHROUGH input can possibly be connected */
1753 if (pa_idxset_size(s->inputs) == 1) {
1754 alt_i = pa_idxset_first(s->inputs, &idx);
1756 if (pa_sink_input_is_passthrough(alt_i))
1763 /* Called from main context */
1764 void pa_sink_enter_passthrough(pa_sink *s) {
1767 /* The sink implementation is reconfigured for passthrough in
1768 * pa_sink_reconfigure(). This function sets the PA core objects to
1769 * passthrough mode. */
1771 /* disable the monitor in passthrough mode */
1772 if (s->monitor_source) {
1773 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1774 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1777 /* set the volume to NORM */
1778 s->saved_volume = *pa_sink_get_volume(s, true);
1779 s->saved_save_volume = s->save_volume;
1781 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1782 pa_sink_set_volume(s, &volume, true, false);
1784 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1787 /* Called from main context */
1788 void pa_sink_leave_passthrough(pa_sink *s) {
1789 /* Unsuspend monitor */
1790 if (s->monitor_source) {
1791 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1792 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1795 /* Restore sink volume to what it was before we entered passthrough mode */
1796 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1798 pa_cvolume_init(&s->saved_volume);
1799 s->saved_save_volume = false;
1803 /* Called from main context. */
1804 static void compute_reference_ratio(pa_sink_input *i) {
1806 pa_cvolume remapped;
1810 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1813 * Calculates the reference ratio from the sink's reference
1814 * volume. This basically calculates:
1816 * i->reference_ratio = i->volume / i->sink->reference_volume
1819 remapped = i->sink->reference_volume;
1820 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1822 ratio = i->reference_ratio;
1824 for (c = 0; c < i->sample_spec.channels; c++) {
1826 /* We don't update when the sink volume is 0 anyway */
1827 if (remapped.values[c] <= PA_VOLUME_MUTED)
1830 /* Don't update the reference ratio unless necessary */
1831 if (pa_sw_volume_multiply(
1833 remapped.values[c]) == i->volume.values[c])
1836 ratio.values[c] = pa_sw_volume_divide(
1837 i->volume.values[c],
1838 remapped.values[c]);
1841 pa_sink_input_set_reference_ratio(i, &ratio);
1844 /* Called from main context. Only called for the root sink in volume sharing
1845 * cases, except for internal recursive calls. */
1846 static void compute_reference_ratios(pa_sink *s) {
1850 pa_sink_assert_ref(s);
1851 pa_assert_ctl_context();
1852 pa_assert(PA_SINK_IS_LINKED(s->state));
1853 pa_assert(pa_sink_flat_volume_enabled(s));
1855 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1856 compute_reference_ratio(i);
1858 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1859 && PA_SINK_IS_LINKED(i->origin_sink->state))
1860 compute_reference_ratios(i->origin_sink);
1864 /* Called from main context. Only called for the root sink in volume sharing
1865 * cases, except for internal recursive calls. */
1866 static void compute_real_ratios(pa_sink *s) {
1870 pa_sink_assert_ref(s);
1871 pa_assert_ctl_context();
1872 pa_assert(PA_SINK_IS_LINKED(s->state));
1873 pa_assert(pa_sink_flat_volume_enabled(s));
1875 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1877 pa_cvolume remapped;
1879 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1880 /* The origin sink uses volume sharing, so this input's real ratio
1881 * is handled as a special case - the real ratio must be 0 dB, and
1882 * as a result i->soft_volume must equal i->volume_factor. */
1883 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1884 i->soft_volume = i->volume_factor;
1886 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1887 compute_real_ratios(i->origin_sink);
1893 * This basically calculates:
1895 * i->real_ratio := i->volume / s->real_volume
1896 * i->soft_volume := i->real_ratio * i->volume_factor
1899 remapped = s->real_volume;
1900 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1902 i->real_ratio.channels = i->sample_spec.channels;
1903 i->soft_volume.channels = i->sample_spec.channels;
1905 for (c = 0; c < i->sample_spec.channels; c++) {
1907 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1908 /* We leave i->real_ratio untouched */
1909 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1913 /* Don't lose accuracy unless necessary */
1914 if (pa_sw_volume_multiply(
1915 i->real_ratio.values[c],
1916 remapped.values[c]) != i->volume.values[c])
1918 i->real_ratio.values[c] = pa_sw_volume_divide(
1919 i->volume.values[c],
1920 remapped.values[c]);
1922 i->soft_volume.values[c] = pa_sw_volume_multiply(
1923 i->real_ratio.values[c],
1924 i->volume_factor.values[c]);
1927 /* We don't copy the soft_volume to the thread_info data
1928 * here. That must be done by the caller */
1932 static pa_cvolume *cvolume_remap_minimal_impact(
1934 const pa_cvolume *template,
1935 const pa_channel_map *from,
1936 const pa_channel_map *to) {
1941 pa_assert(template);
1944 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1945 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1947 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1948 * mapping from sink input to sink volumes:
1950 * If template is a possible remapping from v it is used instead
1951 * of remapping anew.
1953 * If the channel maps don't match we set an all-channel volume on
1954 * the sink to ensure that changing a volume on one stream has no
1955 * effect that cannot be compensated for in another stream that
1956 * does not have the same channel map as the sink. */
1958 if (pa_channel_map_equal(from, to))
1962 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1967 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1971 /* Called from main thread. Only called for the root sink in volume sharing
1972 * cases, except for internal recursive calls. */
1973 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1977 pa_sink_assert_ref(s);
1978 pa_assert(max_volume);
1979 pa_assert(channel_map);
1980 pa_assert(pa_sink_flat_volume_enabled(s));
1982 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1983 pa_cvolume remapped;
1985 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1986 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1987 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1989 /* Ignore this input. The origin sink uses volume sharing, so this
1990 * input's volume will be set to be equal to the root sink's real
1991 * volume. Obviously this input's current volume must not then
1992 * affect what the root sink's real volume will be. */
1996 remapped = i->volume;
1997 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1998 pa_cvolume_merge(max_volume, max_volume, &remapped);
2002 /* Called from main thread. Only called for the root sink in volume sharing
2003 * cases, except for internal recursive calls. */
2004 static bool has_inputs(pa_sink *s) {
2008 pa_sink_assert_ref(s);
2010 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2011 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2018 /* Called from main thread. Only called for the root sink in volume sharing
2019 * cases, except for internal recursive calls. */
2020 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2024 pa_sink_assert_ref(s);
2025 pa_assert(new_volume);
2026 pa_assert(channel_map);
2028 s->real_volume = *new_volume;
2029 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2031 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2032 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2033 if (pa_sink_flat_volume_enabled(s)) {
2034 pa_cvolume new_input_volume;
2036 /* Follow the root sink's real volume. */
2037 new_input_volume = *new_volume;
2038 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2039 pa_sink_input_set_volume_direct(i, &new_input_volume);
2040 compute_reference_ratio(i);
2043 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2044 update_real_volume(i->origin_sink, new_volume, channel_map);
2049 /* Called from main thread. Only called for the root sink in shared volume
2051 static void compute_real_volume(pa_sink *s) {
2052 pa_sink_assert_ref(s);
2053 pa_assert_ctl_context();
2054 pa_assert(PA_SINK_IS_LINKED(s->state));
2055 pa_assert(pa_sink_flat_volume_enabled(s));
2056 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2058 /* This determines the maximum volume of all streams and sets
2059 * s->real_volume accordingly. */
2061 if (!has_inputs(s)) {
2062 /* In the special case that we have no sink inputs we leave the
2063 * volume unmodified. */
2064 update_real_volume(s, &s->reference_volume, &s->channel_map);
2068 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2070 /* First let's determine the new maximum volume of all inputs
2071 * connected to this sink */
2072 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2073 update_real_volume(s, &s->real_volume, &s->channel_map);
2075 /* Then, let's update the real ratios/soft volumes of all inputs
2076 * connected to this sink */
2077 compute_real_ratios(s);
2080 /* Called from main thread. Only called for the root sink in shared volume
2081 * cases, except for internal recursive calls. */
2082 static void propagate_reference_volume(pa_sink *s) {
2086 pa_sink_assert_ref(s);
2087 pa_assert_ctl_context();
2088 pa_assert(PA_SINK_IS_LINKED(s->state));
2089 pa_assert(pa_sink_flat_volume_enabled(s));
2091 /* This is called whenever the sink volume changes that is not
2092 * caused by a sink input volume change. We need to fix up the
2093 * sink input volumes accordingly */
2095 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2096 pa_cvolume new_volume;
2098 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2099 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2100 propagate_reference_volume(i->origin_sink);
2102 /* Since the origin sink uses volume sharing, this input's volume
2103 * needs to be updated to match the root sink's real volume, but
2104 * that will be done later in update_real_volume(). */
2108 /* This basically calculates:
2110 * i->volume := s->reference_volume * i->reference_ratio */
2112 new_volume = s->reference_volume;
2113 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2114 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2115 pa_sink_input_set_volume_direct(i, &new_volume);
2119 /* Called from main thread. Only called for the root sink in volume sharing
2120 * cases, except for internal recursive calls. The return value indicates
2121 * whether any reference volume actually changed. */
2122 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2124 bool reference_volume_changed;
2128 pa_sink_assert_ref(s);
2129 pa_assert(PA_SINK_IS_LINKED(s->state));
2131 pa_assert(channel_map);
2132 pa_assert(pa_cvolume_valid(v));
2135 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2137 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2138 pa_sink_set_reference_volume_direct(s, &volume);
2140 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2142 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2143 /* If the root sink's volume doesn't change, then there can't be any
2144 * changes in the other sinks in the sink tree either.
2146 * It's probably theoretically possible that even if the root sink's
2147 * volume changes slightly, some filter sink doesn't change its volume
2148 * due to rounding errors. If that happens, we still want to propagate
2149 * the changed root sink volume to the sinks connected to the
2150 * intermediate sink that didn't change its volume. This theoretical
2151 * possibility is the reason why we have that !(s->flags &
2152 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2153 * notice even if we returned here false always if
2154 * reference_volume_changed is false. */
2157 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2158 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2159 && PA_SINK_IS_LINKED(i->origin_sink->state))
2160 update_reference_volume(i->origin_sink, v, channel_map, false);
2166 /* Called from main thread */
2167 void pa_sink_set_volume(
2169 const pa_cvolume *volume,
2173 pa_cvolume new_reference_volume;
2176 pa_sink_assert_ref(s);
2177 pa_assert_ctl_context();
2178 pa_assert(PA_SINK_IS_LINKED(s->state));
2179 pa_assert(!volume || pa_cvolume_valid(volume));
2180 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2181 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2183 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2184 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2185 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2186 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2190 /* In case of volume sharing, the volume is set for the root sink first,
2191 * from which it's then propagated to the sharing sinks. */
2192 root_sink = pa_sink_get_master(s);
2194 if (PA_UNLIKELY(!root_sink))
2197 /* As a special exception we accept mono volumes on all sinks --
2198 * even on those with more complex channel maps */
2201 if (pa_cvolume_compatible(volume, &s->sample_spec))
2202 new_reference_volume = *volume;
2204 new_reference_volume = s->reference_volume;
2205 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2208 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2210 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2211 if (pa_sink_flat_volume_enabled(root_sink)) {
2212 /* OK, propagate this volume change back to the inputs */
2213 propagate_reference_volume(root_sink);
2215 /* And now recalculate the real volume */
2216 compute_real_volume(root_sink);
2218 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2222 /* If volume is NULL we synchronize the sink's real and
2223 * reference volumes with the stream volumes. */
2225 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2227 /* Ok, let's determine the new real volume */
2228 compute_real_volume(root_sink);
2230 /* Let's 'push' the reference volume if necessary */
2231 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2232 /* If the sink and its root don't have the same number of channels, we need to remap */
2233 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2234 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2235 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2237 /* Now that the reference volume is updated, we can update the streams'
2238 * reference ratios. */
2239 compute_reference_ratios(root_sink);
2242 if (root_sink->set_volume) {
2243 /* If we have a function set_volume(), then we do not apply a
2244 * soft volume by default. However, set_volume() is free to
2245 * apply one to root_sink->soft_volume */
2247 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2248 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2249 root_sink->set_volume(root_sink);
2252 /* If we have no function set_volume(), then the soft volume
2253 * becomes the real volume */
2254 root_sink->soft_volume = root_sink->real_volume;
2256 /* This tells the sink that soft volume and/or real volume changed */
2258 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2261 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2262 * Only to be called by sink implementor */
2263 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2265 pa_sink_assert_ref(s);
2266 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2268 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2269 pa_sink_assert_io_context(s);
2271 pa_assert_ctl_context();
2274 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2276 s->soft_volume = *volume;
2278 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2279 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2281 s->thread_info.soft_volume = s->soft_volume;
2284 /* Called from the main thread. Only called for the root sink in volume sharing
2285 * cases, except for internal recursive calls. */
2286 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2290 pa_sink_assert_ref(s);
2291 pa_assert(old_real_volume);
2292 pa_assert_ctl_context();
2293 pa_assert(PA_SINK_IS_LINKED(s->state));
2295 /* This is called when the hardware's real volume changes due to
2296 * some external event. We copy the real volume into our
2297 * reference volume and then rebuild the stream volumes based on
2298 * i->real_ratio which should stay fixed. */
2300 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2301 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2304 /* 1. Make the real volume the reference volume */
2305 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2308 if (pa_sink_flat_volume_enabled(s)) {
2310 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2311 pa_cvolume new_volume;
2313 /* 2. Since the sink's reference and real volumes are equal
2314 * now our ratios should be too. */
2315 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2317 /* 3. Recalculate the new stream reference volume based on the
2318 * reference ratio and the sink's reference volume.
2320 * This basically calculates:
2322 * i->volume = s->reference_volume * i->reference_ratio
2324 * This is identical to propagate_reference_volume() */
2325 new_volume = s->reference_volume;
2326 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2327 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2328 pa_sink_input_set_volume_direct(i, &new_volume);
2330 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2331 && PA_SINK_IS_LINKED(i->origin_sink->state))
2332 propagate_real_volume(i->origin_sink, old_real_volume);
2336 /* Something got changed in the hardware. It probably makes sense
2337 * to save changed hw settings given that hw volume changes not
2338 * triggered by PA are almost certainly done by the user. */
2339 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2340 s->save_volume = true;
2343 /* Called from io thread */
2344 void pa_sink_update_volume_and_mute(pa_sink *s) {
2346 pa_sink_assert_io_context(s);
2348 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2351 /* Called from main thread */
2352 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2353 pa_sink_assert_ref(s);
2354 pa_assert_ctl_context();
2355 pa_assert(PA_SINK_IS_LINKED(s->state));
2357 if (s->refresh_volume || force_refresh) {
2358 struct pa_cvolume old_real_volume;
2360 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2362 old_real_volume = s->real_volume;
2364 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2367 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2369 update_real_volume(s, &s->real_volume, &s->channel_map);
2370 propagate_real_volume(s, &old_real_volume);
2373 return &s->reference_volume;
2376 /* Called from main thread. In volume sharing cases, only the root sink may
2378 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2379 pa_cvolume old_real_volume;
2381 pa_sink_assert_ref(s);
2382 pa_assert_ctl_context();
2383 pa_assert(PA_SINK_IS_LINKED(s->state));
2384 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2386 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2388 old_real_volume = s->real_volume;
2389 update_real_volume(s, new_real_volume, &s->channel_map);
2390 propagate_real_volume(s, &old_real_volume);
2393 /* Called from main thread */
2394 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2397 pa_sink_assert_ref(s);
2398 pa_assert_ctl_context();
2400 old_muted = s->muted;
2402 if (mute == old_muted) {
2403 s->save_muted |= save;
2408 s->save_muted = save;
2410 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2411 s->set_mute_in_progress = true;
2413 s->set_mute_in_progress = false;
2416 if (!PA_SINK_IS_LINKED(s->state))
2419 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2420 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2421 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2422 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2425 /* Called from main thread */
2426 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2428 pa_sink_assert_ref(s);
2429 pa_assert_ctl_context();
2430 pa_assert(PA_SINK_IS_LINKED(s->state));
2432 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2435 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2436 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2437 pa_sink_mute_changed(s, mute);
2439 if (s->get_mute(s, &mute) >= 0)
2440 pa_sink_mute_changed(s, mute);
2447 /* Called from main thread */
2448 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2449 pa_sink_assert_ref(s);
2450 pa_assert_ctl_context();
2451 pa_assert(PA_SINK_IS_LINKED(s->state));
2453 if (s->set_mute_in_progress)
2456 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2457 * but we must have this here also, because the save parameter of
2458 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2459 * the mute state when it shouldn't be saved). */
2460 if (new_muted == s->muted)
2463 pa_sink_set_mute(s, new_muted, true);
2466 /* Called from main thread */
2467 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2468 pa_sink_assert_ref(s);
2469 pa_assert_ctl_context();
2472 pa_proplist_update(s->proplist, mode, p);
2474 if (PA_SINK_IS_LINKED(s->state)) {
2475 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2476 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2482 /* Called from main thread */
2483 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2484 void pa_sink_set_description(pa_sink *s, const char *description) {
2486 pa_sink_assert_ref(s);
2487 pa_assert_ctl_context();
2489 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2492 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2494 if (old && description && pa_streq(old, description))
2498 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2500 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2502 if (s->monitor_source) {
2505 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2506 pa_source_set_description(s->monitor_source, n);
2510 if (PA_SINK_IS_LINKED(s->state)) {
2511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2516 /* Called from main thread */
2517 unsigned pa_sink_linked_by(pa_sink *s) {
2520 pa_sink_assert_ref(s);
2521 pa_assert_ctl_context();
2522 pa_assert(PA_SINK_IS_LINKED(s->state));
2524 ret = pa_idxset_size(s->inputs);
2526 /* We add in the number of streams connected to us here. Please
2527 * note the asymmetry to pa_sink_used_by()! */
2529 if (s->monitor_source)
2530 ret += pa_source_linked_by(s->monitor_source);
2535 /* Called from main thread */
2536 unsigned pa_sink_used_by(pa_sink *s) {
2539 pa_sink_assert_ref(s);
2540 pa_assert_ctl_context();
2541 pa_assert(PA_SINK_IS_LINKED(s->state));
2543 ret = pa_idxset_size(s->inputs);
2544 pa_assert(ret >= s->n_corked);
2546 /* Streams connected to our monitor source do not matter for
2547 * pa_sink_used_by()!.*/
2549 return ret - s->n_corked;
2552 /* Called from main thread */
2553 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2558 pa_sink_assert_ref(s);
2559 pa_assert_ctl_context();
2561 if (!PA_SINK_IS_LINKED(s->state))
2566 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2567 pa_sink_input_state_t st;
2569 if (i == ignore_input)
2572 st = pa_sink_input_get_state(i);
2574 /* We do not assert here. It is perfectly valid for a sink input to
2575 * be in the INIT state (i.e. created, marked done but not yet put)
2576 * and we should not care if it's unlinked as it won't contribute
2577 * towards our busy status.
2579 if (!PA_SINK_INPUT_IS_LINKED(st))
2582 if (st == PA_SINK_INPUT_CORKED)
2585 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2591 if (s->monitor_source)
2592 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2597 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2599 case PA_SINK_INIT: return "INIT";
2600 case PA_SINK_IDLE: return "IDLE";
2601 case PA_SINK_RUNNING: return "RUNNING";
2602 case PA_SINK_SUSPENDED: return "SUSPENDED";
2603 case PA_SINK_UNLINKED: return "UNLINKED";
2604 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2607 pa_assert_not_reached();
2610 /* Called from the IO thread */
2611 static void sync_input_volumes_within_thread(pa_sink *s) {
2615 pa_sink_assert_ref(s);
2616 pa_sink_assert_io_context(s);
2618 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2619 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2622 i->thread_info.soft_volume = i->soft_volume;
2623 pa_sink_input_request_rewind(i, 0, true, false, false);
2627 /* Called from the IO thread. Only called for the root sink in volume sharing
2628 * cases, except for internal recursive calls. */
2629 static void set_shared_volume_within_thread(pa_sink *s) {
2630 pa_sink_input *i = NULL;
2633 pa_sink_assert_ref(s);
2635 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2637 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2638 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2639 set_shared_volume_within_thread(i->origin_sink);
2643 /* Called from IO thread, except when it is not */
2644 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2645 pa_sink *s = PA_SINK(o);
2646 pa_sink_assert_ref(s);
2648 switch ((pa_sink_message_t) code) {
2650 case PA_SINK_MESSAGE_ADD_INPUT: {
2651 pa_sink_input *i = PA_SINK_INPUT(userdata);
2653 /* If you change anything here, make sure to change the
2654 * sink input handling a few lines down at
2655 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2657 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2659 /* Since the caller sleeps in pa_sink_input_put(), we can
2660 * safely access data outside of thread_info even though
2663 if ((i->thread_info.sync_prev = i->sync_prev)) {
2664 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2665 pa_assert(i->sync_prev->sync_next == i);
2666 i->thread_info.sync_prev->thread_info.sync_next = i;
2669 if ((i->thread_info.sync_next = i->sync_next)) {
2670 pa_assert(i->sink == i->thread_info.sync_next->sink);
2671 pa_assert(i->sync_next->sync_prev == i);
2672 i->thread_info.sync_next->thread_info.sync_prev = i;
2675 pa_sink_input_attach(i);
2677 pa_sink_input_set_state_within_thread(i, i->state);
2679 /* The requested latency of the sink input needs to be fixed up and
2680 * then configured on the sink. If this causes the sink latency to
2681 * go down, the sink implementor is responsible for doing a rewind
2682 * in the update_requested_latency() callback to ensure that the
2683 * sink buffer doesn't contain more data than what the new latency
2686 * XXX: Does it really make sense to push this responsibility to
2687 * the sink implementors? Wouldn't it be better to do it once in
2688 * the core than many times in the modules? */
2690 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2691 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2693 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2694 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2696 /* We don't rewind here automatically. This is left to the
2697 * sink input implementor because some sink inputs need a
2698 * slow start, i.e. need some time to buffer client
2699 * samples before beginning streaming.
2701 * XXX: Does it really make sense to push this functionality to
2702 * the sink implementors? Wouldn't it be better to do it once in
2703 * the core than many times in the modules? */
2705 /* In flat volume mode we need to update the volume as
2707 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2710 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2711 pa_sink_input *i = PA_SINK_INPUT(userdata);
2713 /* If you change anything here, make sure to change the
2714 * sink input handling a few lines down at
2715 * PA_SINK_MESSAGE_START_MOVE, too. */
2717 pa_sink_input_detach(i);
2719 pa_sink_input_set_state_within_thread(i, i->state);
2721 /* Since the caller sleeps in pa_sink_input_unlink(),
2722 * we can safely access data outside of thread_info even
2723 * though it is mutable */
2725 pa_assert(!i->sync_prev);
2726 pa_assert(!i->sync_next);
2728 if (i->thread_info.sync_prev) {
2729 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2730 i->thread_info.sync_prev = NULL;
2733 if (i->thread_info.sync_next) {
2734 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2735 i->thread_info.sync_next = NULL;
2738 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2739 pa_sink_invalidate_requested_latency(s, true);
2740 pa_sink_request_rewind(s, (size_t) -1);
2742 /* In flat volume mode we need to update the volume as
2744 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2747 case PA_SINK_MESSAGE_START_MOVE: {
2748 pa_sink_input *i = PA_SINK_INPUT(userdata);
2750 /* We don't support moving synchronized streams. */
2751 pa_assert(!i->sync_prev);
2752 pa_assert(!i->sync_next);
2753 pa_assert(!i->thread_info.sync_next);
2754 pa_assert(!i->thread_info.sync_prev);
2756 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2758 size_t sink_nbytes, total_nbytes;
2760 /* The old sink probably has some audio from this
2761 * stream in its buffer. We want to "take it back" as
2762 * much as possible and play it to the new sink. We
2763 * don't know at this point how much the old sink can
2764 * rewind. We have to pick something, and that
2765 * something is the full latency of the old sink here.
2766 * So we rewind the stream buffer by the sink latency
2767 * amount, which may be more than what we should
2768 * rewind. This can result in a chunk of audio being
2769 * played both to the old sink and the new sink.
2771 * FIXME: Fix this code so that we don't have to make
2772 * guesses about how much the sink will actually be
2773 * able to rewind. If someone comes up with a solution
2774 * for this, something to note is that the part of the
2775 * latency that the old sink couldn't rewind should
2776 * ideally be compensated after the stream has moved
2777 * to the new sink by adding silence. The new sink
2778 * most likely can't start playing the moved stream
2779 * immediately, and that gap should be removed from
2780 * the "compensation silence" (at least at the time of
2781 * writing this, the move finish code will actually
2782 * already take care of dropping the new sink's
2783 * unrewindable latency, so taking into account the
2784 * unrewindable latency of the old sink is the only
2787 * The render_memblockq contents are discarded,
2788 * because when the sink changes, the format of the
2789 * audio stored in the render_memblockq may change
2790 * too, making the stored audio invalid. FIXME:
2791 * However, the read and write indices are moved back
2792 * the same amount, so if they are not the same now,
2793 * they won't be the same after the rewind either. If
2794 * the write index of the render_memblockq is ahead of
2795 * the read index, then the render_memblockq will feed
2796 * the new sink some silence first, which it shouldn't
2797 * do. The write index should be flushed to be the
2798 * same as the read index. */
2800 /* Get the latency of the sink */
2801 usec = pa_sink_get_latency_within_thread(s, false);
2802 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2803 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2805 if (total_nbytes > 0) {
2806 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2807 i->thread_info.rewrite_flush = true;
2808 pa_sink_input_process_rewind(i, sink_nbytes);
2812 pa_sink_input_detach(i);
2814 /* Let's remove the sink input ...*/
2815 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2817 pa_sink_invalidate_requested_latency(s, true);
2819 pa_log_debug("Requesting rewind due to started move");
2820 pa_sink_request_rewind(s, (size_t) -1);
2822 /* In flat volume mode we need to update the volume as
2824 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2827 case PA_SINK_MESSAGE_FINISH_MOVE: {
2828 pa_sink_input *i = PA_SINK_INPUT(userdata);
2830 /* We don't support moving synchronized streams. */
2831 pa_assert(!i->sync_prev);
2832 pa_assert(!i->sync_next);
2833 pa_assert(!i->thread_info.sync_next);
2834 pa_assert(!i->thread_info.sync_prev);
2836 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2838 pa_sink_input_attach(i);
2840 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2844 /* In the ideal case the new sink would start playing
2845 * the stream immediately. That requires the sink to
2846 * be able to rewind all of its latency, which usually
2847 * isn't possible, so there will probably be some gap
2848 * before the moved stream becomes audible. We then
2849 * have two possibilities: 1) start playing the stream
2850 * from where it is now, or 2) drop the unrewindable
2851 * latency of the sink from the stream. With option 1
2852 * we won't lose any audio but the stream will have a
2853 * pause. With option 2 we may lose some audio but the
2854 * stream time will be somewhat in sync with the wall
2855 * clock. Lennart seems to have chosen option 2 (one
2856 * of the reasons might have been that option 1 is
2857 * actually much harder to implement), so we drop the
2858 * latency of the new sink from the moved stream and
2859 * hope that the sink will undo most of that in the
2862 /* Get the latency of the sink */
2863 usec = pa_sink_get_latency_within_thread(s, false);
2864 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2867 pa_sink_input_drop(i, nbytes);
2869 pa_log_debug("Requesting rewind due to finished move");
2870 pa_sink_request_rewind(s, nbytes);
2873 /* Updating the requested sink latency has to be done
2874 * after the sink rewind request, not before, because
2875 * otherwise the sink may limit the rewind amount
2878 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2879 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2881 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2882 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2884 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2887 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2888 pa_sink *root_sink = pa_sink_get_master(s);
2890 if (PA_LIKELY(root_sink))
2891 set_shared_volume_within_thread(root_sink);
2896 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2898 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2900 pa_sink_volume_change_push(s);
2902 /* Fall through ... */
2904 case PA_SINK_MESSAGE_SET_VOLUME:
2906 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2907 s->thread_info.soft_volume = s->soft_volume;
2908 pa_sink_request_rewind(s, (size_t) -1);
2911 /* Fall through ... */
2913 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2914 sync_input_volumes_within_thread(s);
2917 case PA_SINK_MESSAGE_GET_VOLUME:
2919 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2921 pa_sink_volume_change_flush(s);
2922 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2925 /* In case sink implementor reset SW volume. */
2926 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2927 s->thread_info.soft_volume = s->soft_volume;
2928 pa_sink_request_rewind(s, (size_t) -1);
2933 case PA_SINK_MESSAGE_SET_MUTE:
2935 if (s->thread_info.soft_muted != s->muted) {
2936 s->thread_info.soft_muted = s->muted;
2937 pa_sink_request_rewind(s, (size_t) -1);
2940 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2945 case PA_SINK_MESSAGE_GET_MUTE:
2947 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2948 return s->get_mute(s, userdata);
2952 case PA_SINK_MESSAGE_SET_STATE: {
2954 bool suspend_change =
2955 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2956 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2958 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2960 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2961 s->thread_info.rewind_nbytes = 0;
2962 s->thread_info.rewind_requested = false;
2965 if (suspend_change) {
2969 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2970 if (i->suspend_within_thread)
2971 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2977 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2979 pa_usec_t *usec = userdata;
2980 *usec = pa_sink_get_requested_latency_within_thread(s);
2982 /* Yes, that's right, the IO thread will see -1 when no
2983 * explicit requested latency is configured, the main
2984 * thread will see max_latency */
2985 if (*usec == (pa_usec_t) -1)
2986 *usec = s->thread_info.max_latency;
2991 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2992 pa_usec_t *r = userdata;
2994 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2999 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
3000 pa_usec_t *r = userdata;
3002 r[0] = s->thread_info.min_latency;
3003 r[1] = s->thread_info.max_latency;
3008 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3010 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3013 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3015 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3018 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3020 *((size_t*) userdata) = s->thread_info.max_rewind;
3023 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3025 *((size_t*) userdata) = s->thread_info.max_request;
3028 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3030 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3033 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3035 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3038 case PA_SINK_MESSAGE_SET_PORT:
3040 pa_assert(userdata);
3042 struct sink_message_set_port *msg_data = userdata;
3043 msg_data->ret = s->set_port(s, msg_data->port);
3047 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3048 /* This message is sent from IO-thread and handled in main thread. */
3049 pa_assert_ctl_context();
3051 /* Make sure we're not messing with main thread when no longer linked */
3052 if (!PA_SINK_IS_LINKED(s->state))
3055 pa_sink_get_volume(s, true);
3056 pa_sink_get_mute(s, true);
3059 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3060 s->thread_info.port_latency_offset = offset;
3063 case PA_SINK_MESSAGE_GET_LATENCY:
3064 case PA_SINK_MESSAGE_MAX:
3071 /* Called from main thread */
3072 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3077 pa_core_assert_ref(c);
3078 pa_assert_ctl_context();
3079 pa_assert(cause != 0);
3081 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3084 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3091 /* Called from IO thread */
3092 void pa_sink_detach_within_thread(pa_sink *s) {
3096 pa_sink_assert_ref(s);
3097 pa_sink_assert_io_context(s);
3098 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3100 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3101 pa_sink_input_detach(i);
3103 if (s->monitor_source)
3104 pa_source_detach_within_thread(s->monitor_source);
3107 /* Called from IO thread */
3108 void pa_sink_attach_within_thread(pa_sink *s) {
3112 pa_sink_assert_ref(s);
3113 pa_sink_assert_io_context(s);
3114 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3116 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3117 pa_sink_input_attach(i);
3119 if (s->monitor_source)
3120 pa_source_attach_within_thread(s->monitor_source);
3123 /* Called from IO thread */
3124 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3125 pa_sink_assert_ref(s);
3126 pa_sink_assert_io_context(s);
3127 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3129 if (nbytes == (size_t) -1)
3130 nbytes = s->thread_info.max_rewind;
3132 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3134 if (s->thread_info.rewind_requested &&
3135 nbytes <= s->thread_info.rewind_nbytes)
3138 s->thread_info.rewind_nbytes = nbytes;
3139 s->thread_info.rewind_requested = true;
3141 if (s->request_rewind)
3142 s->request_rewind(s);
3145 /* Called from IO thread */
3146 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3147 pa_usec_t result = (pa_usec_t) -1;
3150 pa_usec_t monitor_latency;
3152 pa_sink_assert_ref(s);
3153 pa_sink_assert_io_context(s);
3155 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3156 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3158 if (s->thread_info.requested_latency_valid)
3159 return s->thread_info.requested_latency;
3161 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3162 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3163 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3164 result = i->thread_info.requested_sink_latency;
3166 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3168 if (monitor_latency != (pa_usec_t) -1 &&
3169 (result == (pa_usec_t) -1 || result > monitor_latency))
3170 result = monitor_latency;
3172 if (result != (pa_usec_t) -1)
3173 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3175 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3176 /* Only cache if properly initialized */
3177 s->thread_info.requested_latency = result;
3178 s->thread_info.requested_latency_valid = true;
3184 /* Called from main thread */
3185 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3188 pa_sink_assert_ref(s);
3189 pa_assert_ctl_context();
3190 pa_assert(PA_SINK_IS_LINKED(s->state));
3192 if (s->state == PA_SINK_SUSPENDED)
3195 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3200 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3201 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3205 pa_sink_assert_ref(s);
3206 pa_sink_assert_io_context(s);
3208 if (max_rewind == s->thread_info.max_rewind)
3211 s->thread_info.max_rewind = max_rewind;
3213 if (PA_SINK_IS_LINKED(s->thread_info.state))
3214 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3215 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3217 if (s->monitor_source)
3218 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3221 /* Called from main thread */
3222 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3223 pa_sink_assert_ref(s);
3224 pa_assert_ctl_context();
3226 if (PA_SINK_IS_LINKED(s->state))
3227 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3229 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3232 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3233 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3236 pa_sink_assert_ref(s);
3237 pa_sink_assert_io_context(s);
3239 if (max_request == s->thread_info.max_request)
3242 s->thread_info.max_request = max_request;
3244 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3247 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3248 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3252 /* Called from main thread */
3253 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3254 pa_sink_assert_ref(s);
3255 pa_assert_ctl_context();
3257 if (PA_SINK_IS_LINKED(s->state))
3258 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3260 pa_sink_set_max_request_within_thread(s, max_request);
3263 /* Called from IO thread */
3264 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3268 pa_sink_assert_ref(s);
3269 pa_sink_assert_io_context(s);
3271 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3272 s->thread_info.requested_latency_valid = false;
3276 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3278 if (s->update_requested_latency)
3279 s->update_requested_latency(s);
3281 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3282 if (i->update_sink_requested_latency)
3283 i->update_sink_requested_latency(i);
3287 /* Called from main thread */
3288 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3289 pa_sink_assert_ref(s);
3290 pa_assert_ctl_context();
3292 /* min_latency == 0: no limit
3293 * min_latency anything else: specified limit
3295 * Similar for max_latency */
3297 if (min_latency < ABSOLUTE_MIN_LATENCY)
3298 min_latency = ABSOLUTE_MIN_LATENCY;
3300 if (max_latency <= 0 ||
3301 max_latency > ABSOLUTE_MAX_LATENCY)
3302 max_latency = ABSOLUTE_MAX_LATENCY;
3304 pa_assert(min_latency <= max_latency);
3306 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3307 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3308 max_latency == ABSOLUTE_MAX_LATENCY) ||
3309 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3311 if (PA_SINK_IS_LINKED(s->state)) {
3317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3319 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3322 /* Called from main thread */
3323 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3324 pa_sink_assert_ref(s);
3325 pa_assert_ctl_context();
3326 pa_assert(min_latency);
3327 pa_assert(max_latency);
3329 if (PA_SINK_IS_LINKED(s->state)) {
3330 pa_usec_t r[2] = { 0, 0 };
3332 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3334 *min_latency = r[0];
3335 *max_latency = r[1];
3337 *min_latency = s->thread_info.min_latency;
3338 *max_latency = s->thread_info.max_latency;
3342 /* Called from IO thread */
3343 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3344 pa_sink_assert_ref(s);
3345 pa_sink_assert_io_context(s);
3347 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3348 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3349 pa_assert(min_latency <= max_latency);
3351 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3352 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3353 max_latency == ABSOLUTE_MAX_LATENCY) ||
3354 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3356 if (s->thread_info.min_latency == min_latency &&
3357 s->thread_info.max_latency == max_latency)
3360 s->thread_info.min_latency = min_latency;
3361 s->thread_info.max_latency = max_latency;
3363 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3367 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3368 if (i->update_sink_latency_range)
3369 i->update_sink_latency_range(i);
3372 pa_sink_invalidate_requested_latency(s, false);
3374 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3377 /* Called from main thread */
3378 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3379 pa_sink_assert_ref(s);
3380 pa_assert_ctl_context();
3382 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3383 pa_assert(latency == 0);
3387 if (latency < ABSOLUTE_MIN_LATENCY)
3388 latency = ABSOLUTE_MIN_LATENCY;
3390 if (latency > ABSOLUTE_MAX_LATENCY)
3391 latency = ABSOLUTE_MAX_LATENCY;
3393 if (PA_SINK_IS_LINKED(s->state))
3394 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3396 s->thread_info.fixed_latency = latency;
3398 pa_source_set_fixed_latency(s->monitor_source, latency);
3401 /* Called from main thread */
3402 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3405 pa_sink_assert_ref(s);
3406 pa_assert_ctl_context();
3408 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3411 if (PA_SINK_IS_LINKED(s->state))
3412 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3414 latency = s->thread_info.fixed_latency;
3419 /* Called from IO thread */
3420 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3421 pa_sink_assert_ref(s);
3422 pa_sink_assert_io_context(s);
3424 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3425 pa_assert(latency == 0);
3426 s->thread_info.fixed_latency = 0;
3428 if (s->monitor_source)
3429 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3434 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3435 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3437 if (s->thread_info.fixed_latency == latency)
3440 s->thread_info.fixed_latency = latency;
3442 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3446 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3447 if (i->update_sink_fixed_latency)
3448 i->update_sink_fixed_latency(i);
3451 pa_sink_invalidate_requested_latency(s, false);
3453 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3456 /* Called from main context */
3457 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3458 pa_sink_assert_ref(s);
3460 s->port_latency_offset = offset;
3462 if (PA_SINK_IS_LINKED(s->state))
3463 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3465 s->thread_info.port_latency_offset = offset;
3467 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3470 /* Called from main context */
3471 size_t pa_sink_get_max_rewind(pa_sink *s) {
3473 pa_assert_ctl_context();
3474 pa_sink_assert_ref(s);
3476 if (!PA_SINK_IS_LINKED(s->state))
3477 return s->thread_info.max_rewind;
3479 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3484 /* Called from main context */
3485 size_t pa_sink_get_max_request(pa_sink *s) {
3487 pa_sink_assert_ref(s);
3488 pa_assert_ctl_context();
3490 if (!PA_SINK_IS_LINKED(s->state))
3491 return s->thread_info.max_request;
3493 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3498 /* Called from main context */
3499 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3500 pa_device_port *port;
3503 pa_sink_assert_ref(s);
3504 pa_assert_ctl_context();
3507 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3508 return -PA_ERR_NOTIMPLEMENTED;
3512 return -PA_ERR_NOENTITY;
3514 if (!(port = pa_hashmap_get(s->ports, name)))
3515 return -PA_ERR_NOENTITY;
3517 if (s->active_port == port) {
3518 s->save_port = s->save_port || save;
3522 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3523 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3524 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3528 ret = s->set_port(s, port);
3531 return -PA_ERR_NOENTITY;
3533 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3535 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3537 s->active_port = port;
3538 s->save_port = save;
3540 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3542 /* The active port affects the default sink selection. */
3543 pa_core_update_default_sink(s->core);
3545 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3550 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3551 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3555 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3558 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3560 if (pa_streq(ff, "microphone"))
3561 t = "audio-input-microphone";
3562 else if (pa_streq(ff, "webcam"))
3564 else if (pa_streq(ff, "computer"))
3566 else if (pa_streq(ff, "handset"))
3568 else if (pa_streq(ff, "portable"))
3569 t = "multimedia-player";
3570 else if (pa_streq(ff, "tv"))
3571 t = "video-display";
3574 * The following icons are not part of the icon naming spec,
3575 * because Rodney Dawes sucks as the maintainer of that spec.
3577 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3579 else if (pa_streq(ff, "headset"))
3580 t = "audio-headset";
3581 else if (pa_streq(ff, "headphone"))
3582 t = "audio-headphones";
3583 else if (pa_streq(ff, "speaker"))
3584 t = "audio-speakers";
3585 else if (pa_streq(ff, "hands-free"))
3586 t = "audio-handsfree";
3590 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3591 if (pa_streq(c, "modem"))
3598 t = "audio-input-microphone";
3601 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3602 if (strstr(profile, "analog"))
3604 else if (strstr(profile, "iec958"))
3606 else if (strstr(profile, "hdmi"))
3610 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3612 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3617 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3618 const char *s, *d = NULL, *k;
3621 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3625 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3629 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3630 if (pa_streq(s, "internal"))
3631 d = _("Built-in Audio");
3634 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3635 if (pa_streq(s, "modem"))
3639 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3644 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3647 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3649 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3654 bool pa_device_init_intended_roles(pa_proplist *p) {
3658 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3661 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3662 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3663 || pa_streq(s, "headset")) {
3664 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3671 unsigned pa_device_init_priority(pa_proplist *p) {
3673 unsigned priority = 0;
3677 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3679 if (pa_streq(s, "sound"))
3681 else if (!pa_streq(s, "modem"))
3685 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3687 if (pa_streq(s, "headphone"))
3689 else if (pa_streq(s, "hifi"))
3691 else if (pa_streq(s, "speaker"))
3693 else if (pa_streq(s, "portable"))
3697 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3699 if (pa_streq(s, "bluetooth"))
3701 else if (pa_streq(s, "usb"))
3703 else if (pa_streq(s, "pci"))
3707 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3709 if (pa_startswith(s, "analog-"))
3711 else if (pa_startswith(s, "iec958-"))
3718 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3720 /* Called from the IO thread. */
3721 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3722 pa_sink_volume_change *c;
3723 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3724 c = pa_xnew(pa_sink_volume_change, 1);
3726 PA_LLIST_INIT(pa_sink_volume_change, c);
3728 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3732 /* Called from the IO thread. */
3733 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3735 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3739 /* Called from the IO thread. */
3740 void pa_sink_volume_change_push(pa_sink *s) {
3741 pa_sink_volume_change *c = NULL;
3742 pa_sink_volume_change *nc = NULL;
3743 pa_sink_volume_change *pc = NULL;
3744 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3746 const char *direction = NULL;
3749 nc = pa_sink_volume_change_new(s);
3751 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3752 * Adding one more volume for HW would get us rid of this, but I am trying
3753 * to survive with the ones we already have. */
3754 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3756 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3757 pa_log_debug("Volume not changing");
3758 pa_sink_volume_change_free(nc);
3762 nc->at = pa_sink_get_latency_within_thread(s, false);
3763 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3765 if (s->thread_info.volume_changes_tail) {
3766 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3767 /* If volume is going up let's do it a bit late. If it is going
3768 * down let's do it a bit early. */
3769 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3770 if (nc->at + safety_margin > c->at) {
3771 nc->at += safety_margin;
3776 else if (nc->at - safety_margin > c->at) {
3777 nc->at -= safety_margin;
3785 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3786 nc->at += safety_margin;
3789 nc->at -= safety_margin;
3792 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3795 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3798 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3800 /* We can ignore volume events that came earlier but should happen later than this. */
3801 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3802 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3803 pa_sink_volume_change_free(c);
3806 s->thread_info.volume_changes_tail = nc;
3809 /* Called from the IO thread. */
3810 static void pa_sink_volume_change_flush(pa_sink *s) {
3811 pa_sink_volume_change *c = s->thread_info.volume_changes;
3813 s->thread_info.volume_changes = NULL;
3814 s->thread_info.volume_changes_tail = NULL;
3816 pa_sink_volume_change *next = c->next;
3817 pa_sink_volume_change_free(c);
3822 /* Called from the IO thread. */
3823 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3829 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3835 pa_assert(s->write_volume);
3837 now = pa_rtclock_now();
3839 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3840 pa_sink_volume_change *c = s->thread_info.volume_changes;
3841 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3842 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3843 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3845 s->thread_info.current_hw_volume = c->hw_volume;
3846 pa_sink_volume_change_free(c);
3852 if (s->thread_info.volume_changes) {
3854 *usec_to_next = s->thread_info.volume_changes->at - now;
3855 if (pa_log_ratelimit(PA_LOG_DEBUG))
3856 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3861 s->thread_info.volume_changes_tail = NULL;
3866 /* Called from the IO thread. */
3867 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3868 /* All the queued volume events later than current latency are shifted to happen earlier. */
3869 pa_sink_volume_change *c;
3870 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3871 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3872 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3874 pa_log_debug("latency = %lld", (long long) limit);
3875 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3877 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3878 pa_usec_t modified_limit = limit;
3879 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3880 modified_limit -= s->thread_info.volume_change_safety_margin;
3882 modified_limit += s->thread_info.volume_change_safety_margin;
3883 if (c->at > modified_limit) {
3885 if (c->at < modified_limit)
3886 c->at = modified_limit;
3888 prev_vol = pa_cvolume_avg(&c->hw_volume);
3890 pa_sink_volume_change_apply(s, NULL);
3893 /* Called from the main thread */
3894 /* Gets the list of formats supported by the sink. The members and idxset must
3895 * be freed by the caller. */
3896 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3901 if (s->get_formats) {
3902 /* Sink supports format query, all is good */
3903 ret = s->get_formats(s);
3905 /* Sink doesn't support format query, so assume it does PCM */
3906 pa_format_info *f = pa_format_info_new();
3907 f->encoding = PA_ENCODING_PCM;
3909 ret = pa_idxset_new(NULL, NULL);
3910 pa_idxset_put(ret, f, NULL);
3916 /* Called from the main thread */
3917 /* Allows an external source to set what formats a sink supports if the sink
3918 * permits this. The function makes a copy of the formats on success. */
3919 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3924 /* Sink supports setting formats -- let's give it a shot */
3925 return s->set_formats(s, formats);
3927 /* Sink doesn't support setting this -- bail out */
3931 /* Called from the main thread */
3932 /* Checks if the sink can accept this format */
3933 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3934 pa_idxset *formats = NULL;
3940 formats = pa_sink_get_formats(s);
3943 pa_format_info *finfo_device;
3946 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3947 if (pa_format_info_is_compatible(finfo_device, f)) {
3953 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3959 /* Called from the main thread */
3960 /* Calculates the intersection between formats supported by the sink and
3961 * in_formats, and returns these, in the order of the sink's formats. */
3962 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3963 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3964 pa_format_info *f_sink, *f_in;
3969 if (!in_formats || pa_idxset_isempty(in_formats))
3972 sink_formats = pa_sink_get_formats(s);
3974 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3975 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3976 if (pa_format_info_is_compatible(f_sink, f_in))
3977 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3983 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3988 /* Called from the main thread. */
3989 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3990 pa_cvolume old_volume;
3991 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3992 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3997 old_volume = s->reference_volume;
3999 if (pa_cvolume_equal(volume, &old_volume))
4002 s->reference_volume = *volume;
4003 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4004 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4005 s->flags & PA_SINK_DECIBEL_VOLUME),
4006 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4007 s->flags & PA_SINK_DECIBEL_VOLUME));
4009 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4010 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);