2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
55 #define MAX_MIX_CHANNELS 32
56 #define MIX_BUFFER_LENGTH (pa_page_size())
57 #define ABSOLUTE_MIN_LATENCY (500)
58 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
59 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
61 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
63 struct pa_sink_volume_change {
67 PA_LLIST_FIELDS(pa_sink_volume_change);
70 struct sink_message_set_port {
75 static void sink_free(pa_object *s);
77 static void pa_sink_volume_change_push(pa_sink *s);
78 static void pa_sink_volume_change_flush(pa_sink *s);
79 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
82 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
84 char *dump_time = NULL, *dump_path_surfix = NULL;
85 const char *s_device_api_str, *card_name_str, *device_idx_str;
90 /* open file for dump pcm */
91 if (s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
92 pa_gettimeofday(&now);
93 localtime_r(&now.tv_sec, &tm);
94 memset(&datetime[0], 0x00, sizeof(datetime));
95 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
96 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
98 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
99 if (pa_streq(s_device_api_str, "alsa")) {
100 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
101 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
102 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
104 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
107 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
110 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
111 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
113 s->pcm_dump_fp = fopen(s->dump_path, "w");
115 pa_log_warn("%s open failed", s->dump_path);
117 pa_log_info("%s opened", s->dump_path);
120 pa_xfree(dump_path_surfix);
121 /* close file for dump pcm when config is changed */
122 } else if (~s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && s->pcm_dump_fp) {
123 fclose(s->pcm_dump_fp);
124 pa_log_info("%s closed", s->dump_path);
125 pa_xfree(s->dump_path);
126 s->pcm_dump_fp = NULL;
130 if (s->pcm_dump_fp) {
133 ptr = pa_memblock_acquire(chunk->memblock);
135 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
137 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
139 pa_memblock_release(chunk->memblock);
144 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
148 data->proplist = pa_proplist_new();
149 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
154 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
157 pa_xfree(data->name);
158 data->name = pa_xstrdup(name);
161 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
164 if ((data->sample_spec_is_set = !!spec))
165 data->sample_spec = *spec;
168 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
171 if ((data->channel_map_is_set = !!map))
172 data->channel_map = *map;
175 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
178 data->alternate_sample_rate_is_set = true;
179 data->alternate_sample_rate = alternate_sample_rate;
182 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
185 if ((data->volume_is_set = !!volume))
186 data->volume = *volume;
189 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
192 data->muted_is_set = true;
196 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
199 pa_xfree(data->active_port);
200 data->active_port = pa_xstrdup(port);
203 void pa_sink_new_data_done(pa_sink_new_data *data) {
206 pa_proplist_free(data->proplist);
209 pa_hashmap_free(data->ports);
211 pa_xfree(data->name);
212 pa_xfree(data->active_port);
215 /* Called from main context */
216 static void reset_callbacks(pa_sink *s) {
220 s->get_volume = NULL;
221 s->set_volume = NULL;
222 s->write_volume = NULL;
225 s->request_rewind = NULL;
226 s->update_requested_latency = NULL;
228 s->get_formats = NULL;
229 s->set_formats = NULL;
230 s->reconfigure = NULL;
233 /* Called from main context */
234 pa_sink* pa_sink_new(
236 pa_sink_new_data *data,
237 pa_sink_flags_t flags) {
241 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
242 pa_source_new_data source_data;
248 pa_assert(data->name);
249 pa_assert_ctl_context();
251 s = pa_msgobject_new(pa_sink);
253 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
254 pa_log_debug("Failed to register name %s.", data->name);
259 pa_sink_new_data_set_name(data, name);
261 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
263 pa_namereg_unregister(core, name);
267 /* FIXME, need to free s here on failure */
269 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
270 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
272 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
274 if (!data->channel_map_is_set)
275 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
277 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
278 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
280 /* FIXME: There should probably be a general function for checking whether
281 * the sink volume is allowed to be set, like there is for sink inputs. */
282 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
284 if (!data->volume_is_set) {
285 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
286 data->save_volume = false;
289 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
290 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
292 if (!data->muted_is_set)
296 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
298 pa_device_init_description(data->proplist, data->card);
299 pa_device_init_icon(data->proplist, true);
300 pa_device_init_intended_roles(data->proplist);
302 if (!data->active_port) {
303 pa_device_port *p = pa_device_port_find_best(data->ports);
305 pa_sink_new_data_set_port(data, p->name);
308 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
310 pa_namereg_unregister(core, name);
314 s->parent.parent.free = sink_free;
315 s->parent.process_msg = pa_sink_process_msg;
318 s->state = PA_SINK_INIT;
321 s->suspend_cause = data->suspend_cause;
322 pa_sink_set_mixer_dirty(s, false);
323 s->name = pa_xstrdup(name);
324 s->proplist = pa_proplist_copy(data->proplist);
325 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
326 s->module = data->module;
327 s->card = data->card;
329 s->priority = pa_device_init_priority(s->proplist);
331 s->sample_spec = data->sample_spec;
332 s->channel_map = data->channel_map;
333 s->default_sample_rate = s->sample_spec.rate;
335 if (data->alternate_sample_rate_is_set)
336 s->alternate_sample_rate = data->alternate_sample_rate;
338 s->alternate_sample_rate = s->core->alternate_sample_rate;
340 s->inputs = pa_idxset_new(NULL, NULL);
342 s->input_to_master = NULL;
344 s->reference_volume = s->real_volume = data->volume;
345 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
346 s->base_volume = PA_VOLUME_NORM;
347 s->n_volume_steps = PA_VOLUME_NORM+1;
348 s->muted = data->muted;
349 s->refresh_volume = s->refresh_muted = false;
356 /* As a minor optimization we just steal the list instead of
358 s->ports = data->ports;
361 s->active_port = NULL;
362 s->save_port = false;
364 if (data->active_port)
365 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
366 s->save_port = data->save_port;
368 /* Hopefully the active port has already been assigned in the previous call
369 to pa_device_port_find_best, but better safe than sorry */
371 s->active_port = pa_device_port_find_best(s->ports);
374 s->port_latency_offset = s->active_port->latency_offset;
376 s->port_latency_offset = 0;
378 s->save_volume = data->save_volume;
379 s->save_muted = data->save_muted;
380 #ifdef TIZEN_PCM_DUMP
381 s->pcm_dump_fp = NULL;
385 pa_silence_memchunk_get(
386 &core->silence_cache,
392 s->thread_info.rtpoll = NULL;
393 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
394 (pa_free_cb_t) pa_sink_input_unref);
395 s->thread_info.soft_volume = s->soft_volume;
396 s->thread_info.soft_muted = s->muted;
397 s->thread_info.state = s->state;
398 s->thread_info.rewind_nbytes = 0;
399 s->thread_info.rewind_requested = false;
400 s->thread_info.max_rewind = 0;
401 s->thread_info.max_request = 0;
402 s->thread_info.requested_latency_valid = false;
403 s->thread_info.requested_latency = 0;
404 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
405 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
406 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
408 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
409 s->thread_info.volume_changes_tail = NULL;
410 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
411 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
412 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
413 s->thread_info.port_latency_offset = s->port_latency_offset;
415 /* FIXME: This should probably be moved to pa_sink_put() */
416 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
419 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
421 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
422 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
425 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
426 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
430 pa_source_new_data_init(&source_data);
431 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
432 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
433 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
434 source_data.name = pa_sprintf_malloc("%s.monitor", name);
435 source_data.driver = data->driver;
436 source_data.module = data->module;
437 source_data.card = data->card;
439 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
440 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
441 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
443 s->monitor_source = pa_source_new(core, &source_data,
444 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
445 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
447 pa_source_new_data_done(&source_data);
449 if (!s->monitor_source) {
455 s->monitor_source->monitor_of = s;
457 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
458 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
459 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
464 /* Called from main context */
465 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
468 bool suspend_cause_changed;
473 pa_assert_ctl_context();
475 state_changed = state != s->state;
476 suspend_cause_changed = suspend_cause != s->suspend_cause;
478 if (!state_changed && !suspend_cause_changed)
481 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
482 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
484 /* If we are resuming, suspend_cause must be 0. */
485 pa_assert(!resuming || !suspend_cause);
487 /* Here's something to think about: what to do with the suspend cause if
488 * resuming the sink fails? The old suspend cause will be incorrect, so we
489 * can't use that. On the other hand, if we set no suspend cause (as is the
490 * case currently), then it looks strange to have a sink suspended without
491 * any cause. It might be a good idea to add a new "resume failed" suspend
492 * cause, or it might just add unnecessary complexity, given that the
493 * current approach of not setting any suspend cause works well enough. */
495 if (s->set_state && state_changed) {
496 ret = s->set_state(s, state);
497 /* set_state() is allowed to fail only when resuming. */
498 pa_assert(ret >= 0 || resuming);
501 if (ret >= 0 && s->asyncmsgq && state_changed)
502 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
503 /* SET_STATE is allowed to fail only when resuming. */
507 s->set_state(s, PA_SINK_SUSPENDED);
510 #ifdef TIZEN_PCM_DUMP
511 /* close file for dump pcm */
512 if (s->pcm_dump_fp && (s->core->pcm_dump_option & PA_PCM_DUMP_OPTION_SEPARATED) && suspending) {
513 fclose(s->pcm_dump_fp);
514 pa_log_info("%s closed", s->dump_path);
515 pa_xfree(s->dump_path);
516 s->pcm_dump_fp = NULL;
519 if (suspend_cause_changed) {
520 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
521 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
523 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
524 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
525 s->suspend_cause = suspend_cause;
532 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
535 /* If we enter UNLINKED state, then we don't send change notifications.
536 * pa_sink_unlink() will send unlink notifications instead. */
537 if (state != PA_SINK_UNLINKED) {
538 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
539 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
543 if (suspending || resuming) {
547 /* We're suspending or resuming, tell everyone about it */
549 PA_IDXSET_FOREACH(i, s->inputs, idx)
550 if (s->state == PA_SINK_SUSPENDED &&
551 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
552 pa_sink_input_kill(i);
554 i->suspend(i, state == PA_SINK_SUSPENDED);
558 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
559 pa_source_sync_suspend(s->monitor_source);
564 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
570 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
571 pa_sink_flags_t flags;
574 pa_assert(!s->write_volume || cb);
578 /* Save the current flags so we can tell if they've changed */
582 /* The sink implementor is responsible for setting decibel volume support */
583 s->flags |= PA_SINK_HW_VOLUME_CTRL;
585 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
586 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
587 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
590 /* If the flags have changed after init, let any clients know via a change event */
591 if (s->state != PA_SINK_INIT && flags != s->flags)
592 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
595 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
596 pa_sink_flags_t flags;
599 pa_assert(!cb || s->set_volume);
601 s->write_volume = cb;
603 /* Save the current flags so we can tell if they've changed */
607 s->flags |= PA_SINK_DEFERRED_VOLUME;
609 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
611 /* If the flags have changed after init, let any clients know via a change event */
612 if (s->state != PA_SINK_INIT && flags != s->flags)
613 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
616 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
622 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
623 pa_sink_flags_t flags;
629 /* Save the current flags so we can tell if they've changed */
633 s->flags |= PA_SINK_HW_MUTE_CTRL;
635 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
637 /* If the flags have changed after init, let any clients know via a change event */
638 if (s->state != PA_SINK_INIT && flags != s->flags)
639 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
642 static void enable_flat_volume(pa_sink *s, bool enable) {
643 pa_sink_flags_t flags;
647 /* Always follow the overall user preference here */
648 enable = enable && s->core->flat_volumes;
650 /* Save the current flags so we can tell if they've changed */
654 s->flags |= PA_SINK_FLAT_VOLUME;
656 s->flags &= ~PA_SINK_FLAT_VOLUME;
658 /* If the flags have changed after init, let any clients know via a change event */
659 if (s->state != PA_SINK_INIT && flags != s->flags)
660 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
663 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
664 pa_sink_flags_t flags;
668 /* Save the current flags so we can tell if they've changed */
672 s->flags |= PA_SINK_DECIBEL_VOLUME;
673 enable_flat_volume(s, true);
675 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
676 enable_flat_volume(s, false);
679 /* If the flags have changed after init, let any clients know via a change event */
680 if (s->state != PA_SINK_INIT && flags != s->flags)
681 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
684 /* Called from main context */
685 void pa_sink_put(pa_sink* s) {
686 pa_sink_assert_ref(s);
687 pa_assert_ctl_context();
689 pa_assert(s->state == PA_SINK_INIT);
690 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
692 /* The following fields must be initialized properly when calling _put() */
693 pa_assert(s->asyncmsgq);
694 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
696 /* Generally, flags should be initialized via pa_sink_new(). As a
697 * special exception we allow some volume related flags to be set
698 * between _new() and _put() by the callback setter functions above.
700 * Thus we implement a couple safeguards here which ensure the above
701 * setters were used (or at least the implementor made manual changes
702 * in a compatible way).
704 * Note: All of these flags set here can change over the life time
706 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
707 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
708 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
710 /* XXX: Currently decibel volume is disabled for all sinks that use volume
711 * sharing. When the master sink supports decibel volume, it would be good
712 * to have the flag also in the filter sink, but currently we don't do that
713 * so that the flags of the filter sink never change when it's moved from
714 * a master sink to another. One solution for this problem would be to
715 * remove user-visible volume altogether from filter sinks when volume
716 * sharing is used, but the current approach was easier to implement... */
717 /* We always support decibel volumes in software, otherwise we leave it to
718 * the sink implementor to set this flag as needed.
720 * Note: This flag can also change over the life time of the sink. */
721 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
722 pa_sink_enable_decibel_volume(s, true);
723 s->soft_volume = s->reference_volume;
726 /* If the sink implementor support DB volumes by itself, we should always
727 * try and enable flat volumes too */
728 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
729 enable_flat_volume(s, true);
731 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
732 pa_sink *root_sink = pa_sink_get_master(s);
734 pa_assert(root_sink);
736 s->reference_volume = root_sink->reference_volume;
737 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
739 s->real_volume = root_sink->real_volume;
740 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
742 /* We assume that if the sink implementor changed the default
743 * volume he did so in real_volume, because that is the usual
744 * place where he is supposed to place his changes. */
745 s->reference_volume = s->real_volume;
747 s->thread_info.soft_volume = s->soft_volume;
748 s->thread_info.soft_muted = s->muted;
749 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
751 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
752 || (s->base_volume == PA_VOLUME_NORM
753 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
754 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
755 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
756 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
757 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
759 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
760 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
761 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
763 if (s->suspend_cause)
764 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
766 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
768 pa_source_put(s->monitor_source);
770 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
771 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
773 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
774 * because module-switch-on-connect needs to know the old default sink */
775 pa_core_update_default_sink(s->core);
778 /* Called from main context */
779 void pa_sink_unlink(pa_sink* s) {
781 pa_sink_input *i, PA_UNUSED *j = NULL;
783 pa_sink_assert_ref(s);
784 pa_assert_ctl_context();
786 /* Please note that pa_sink_unlink() does more than simply
787 * reversing pa_sink_put(). It also undoes the registrations
788 * already done in pa_sink_new()! */
790 if (s->unlink_requested)
793 s->unlink_requested = true;
795 linked = PA_SINK_IS_LINKED(s->state);
798 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
800 if (s->state != PA_SINK_UNLINKED)
801 pa_namereg_unregister(s->core, s->name);
802 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
804 pa_core_update_default_sink(s->core);
807 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
809 while ((i = pa_idxset_first(s->inputs, NULL))) {
811 pa_sink_input_kill(i);
816 sink_set_state(s, PA_SINK_UNLINKED, 0);
818 s->state = PA_SINK_UNLINKED;
822 if (s->monitor_source)
823 pa_source_unlink(s->monitor_source);
826 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
827 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
831 /* Called from main context */
832 static void sink_free(pa_object *o) {
833 pa_sink *s = PA_SINK(o);
836 pa_assert_ctl_context();
837 pa_assert(pa_sink_refcnt(s) == 0);
838 pa_assert(!PA_SINK_IS_LINKED(s->state));
840 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
842 pa_sink_volume_change_flush(s);
844 if (s->monitor_source) {
845 pa_source_unref(s->monitor_source);
846 s->monitor_source = NULL;
849 pa_idxset_free(s->inputs, NULL);
850 pa_hashmap_free(s->thread_info.inputs);
852 if (s->silence.memblock)
853 pa_memblock_unref(s->silence.memblock);
859 pa_proplist_free(s->proplist);
862 pa_hashmap_free(s->ports);
864 #ifdef TIZEN_PCM_DUMP
865 /* close file for dump pcm */
866 if (s->pcm_dump_fp) {
867 fclose(s->pcm_dump_fp);
868 pa_log_info("%s closed", s->dump_path);
869 pa_xfree(s->dump_path);
870 s->pcm_dump_fp = NULL;
876 /* Called from main context, and not while the IO thread is active, please */
877 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
878 pa_sink_assert_ref(s);
879 pa_assert_ctl_context();
883 if (s->monitor_source)
884 pa_source_set_asyncmsgq(s->monitor_source, q);
887 /* Called from main context, and not while the IO thread is active, please */
888 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
889 pa_sink_flags_t old_flags;
890 pa_sink_input *input;
893 pa_sink_assert_ref(s);
894 pa_assert_ctl_context();
896 /* For now, allow only a minimal set of flags to be changed. */
897 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
899 old_flags = s->flags;
900 s->flags = (s->flags & ~mask) | (value & mask);
902 if (s->flags == old_flags)
905 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
906 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
908 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
909 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
910 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
912 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
913 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
915 if (s->monitor_source)
916 pa_source_update_flags(s->monitor_source,
917 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
918 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
919 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
920 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
922 PA_IDXSET_FOREACH(input, s->inputs, idx) {
923 if (input->origin_sink)
924 pa_sink_update_flags(input->origin_sink, mask, value);
928 /* Called from IO context, or before _put() from main context */
929 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
930 pa_sink_assert_ref(s);
931 pa_sink_assert_io_context(s);
933 s->thread_info.rtpoll = p;
935 if (s->monitor_source)
936 pa_source_set_rtpoll(s->monitor_source, p);
939 /* Called from main context */
940 int pa_sink_update_status(pa_sink*s) {
941 pa_sink_assert_ref(s);
942 pa_assert_ctl_context();
943 pa_assert(PA_SINK_IS_LINKED(s->state));
945 if (s->state == PA_SINK_SUSPENDED)
948 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
951 /* Called from any context - must be threadsafe */
952 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
953 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
956 /* Called from main context */
957 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
958 pa_suspend_cause_t merged_cause;
960 pa_sink_assert_ref(s);
961 pa_assert_ctl_context();
962 pa_assert(PA_SINK_IS_LINKED(s->state));
963 pa_assert(cause != 0);
966 merged_cause = s->suspend_cause | cause;
968 merged_cause = s->suspend_cause & ~cause;
970 if (!(merged_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
971 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
972 it'll be handled just fine. */
973 pa_sink_set_mixer_dirty(s, false);
974 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
975 if (s->active_port && s->set_port) {
976 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
977 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
978 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
981 s->set_port(s, s->active_port);
992 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
994 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
997 /* Called from main context */
998 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
999 pa_sink_input *i, *n;
1002 pa_sink_assert_ref(s);
1003 pa_assert_ctl_context();
1004 pa_assert(PA_SINK_IS_LINKED(s->state));
1009 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1010 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1012 pa_sink_input_ref(i);
1014 if (pa_sink_input_start_move(i) >= 0)
1015 pa_queue_push(q, i);
1017 pa_sink_input_unref(i);
1023 /* Called from main context */
1024 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1027 pa_sink_assert_ref(s);
1028 pa_assert_ctl_context();
1029 pa_assert(PA_SINK_IS_LINKED(s->state));
1032 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1033 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1034 if (pa_sink_input_finish_move(i, s, save) < 0)
1035 pa_sink_input_fail_move(i);
1038 pa_sink_input_unref(i);
1041 pa_queue_free(q, NULL);
1044 /* Called from main context */
1045 void pa_sink_move_all_fail(pa_queue *q) {
1048 pa_assert_ctl_context();
1051 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1052 pa_sink_input_fail_move(i);
1053 pa_sink_input_unref(i);
1056 pa_queue_free(q, NULL);
1059 /* Called from IO thread context */
1060 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1065 pa_sink_assert_ref(s);
1066 pa_sink_assert_io_context(s);
1068 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1069 size_t uf = i->thread_info.underrun_for_sink;
1071 /* Propagate down the filter tree */
1072 if (i->origin_sink) {
1073 size_t filter_result, left_to_play_origin;
1075 /* The recursive call works in the origin sink domain ... */
1076 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1078 /* .. and returns the time to sleep before waking up. We need the
1079 * underrun duration for comparisons, so we undo the subtraction on
1080 * the return value... */
1081 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1083 /* ... and convert it back to the master sink domain */
1084 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1086 /* Remember the longest underrun so far */
1087 if (filter_result > result)
1088 result = filter_result;
1092 /* No underrun here, move on */
1094 } else if (uf >= left_to_play) {
1095 /* The sink has possibly consumed all the data the sink input provided */
1096 pa_sink_input_process_underrun(i);
1097 } else if (uf > result) {
1098 /* Remember the longest underrun so far */
1104 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1105 (long) result, (long) left_to_play - result);
1106 return left_to_play - result;
1109 /* Called from IO thread context */
1110 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1114 pa_sink_assert_ref(s);
1115 pa_sink_assert_io_context(s);
1116 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1118 /* If nobody requested this and this is actually no real rewind
1119 * then we can short cut this. Please note that this means that
1120 * not all rewind requests triggered upstream will always be
1121 * translated in actual requests! */
1122 if (!s->thread_info.rewind_requested && nbytes <= 0)
1125 s->thread_info.rewind_nbytes = 0;
1126 s->thread_info.rewind_requested = false;
1129 pa_log_debug("Processing rewind...");
1130 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1131 pa_sink_volume_change_rewind(s, nbytes);
1132 #ifdef TIZEN_PCM_DUMP
1135 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1139 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1140 pa_sink_input_assert_ref(i);
1141 pa_sink_input_process_rewind(i, nbytes);
1145 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1146 pa_source_process_rewind(s->monitor_source, nbytes);
1150 /* Called from IO thread context */
1151 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1155 size_t mixlength = *length;
1157 pa_sink_assert_ref(s);
1158 pa_sink_assert_io_context(s);
1161 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1162 pa_sink_input_assert_ref(i);
1164 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1166 if (mixlength == 0 || info->chunk.length < mixlength)
1167 mixlength = info->chunk.length;
1169 if (pa_memblock_is_silence(info->chunk.memblock)) {
1170 pa_memblock_unref(info->chunk.memblock);
1174 info->userdata = pa_sink_input_ref(i);
1176 pa_assert(info->chunk.memblock);
1177 pa_assert(info->chunk.length > 0);
1185 *length = mixlength;
1190 /* Called from IO thread context */
1191 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1195 unsigned n_unreffed = 0;
1197 pa_sink_assert_ref(s);
1198 pa_sink_assert_io_context(s);
1200 pa_assert(result->memblock);
1201 pa_assert(result->length > 0);
1203 /* We optimize for the case where the order of the inputs has not changed */
1205 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1207 pa_mix_info* m = NULL;
1209 pa_sink_input_assert_ref(i);
1211 /* Let's try to find the matching entry info the pa_mix_info array */
1212 for (j = 0; j < n; j ++) {
1214 if (info[p].userdata == i) {
1224 /* Drop read data */
1225 pa_sink_input_drop(i, result->length);
1227 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1229 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1230 void *ostate = NULL;
1231 pa_source_output *o;
1234 if (m && m->chunk.memblock) {
1236 pa_memblock_ref(c.memblock);
1237 pa_assert(result->length <= c.length);
1238 c.length = result->length;
1240 pa_memchunk_make_writable(&c, 0);
1241 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1244 pa_memblock_ref(c.memblock);
1245 pa_assert(result->length <= c.length);
1246 c.length = result->length;
1249 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1250 pa_source_output_assert_ref(o);
1251 pa_assert(o->direct_on_input == i);
1252 pa_source_post_direct(s->monitor_source, o, &c);
1255 pa_memblock_unref(c.memblock);
1260 if (m->chunk.memblock) {
1261 pa_memblock_unref(m->chunk.memblock);
1262 pa_memchunk_reset(&m->chunk);
1265 pa_sink_input_unref(m->userdata);
1272 /* Now drop references to entries that are included in the
1273 * pa_mix_info array but don't exist anymore */
1275 if (n_unreffed < n) {
1276 for (; n > 0; info++, n--) {
1278 pa_sink_input_unref(info->userdata);
1279 if (info->chunk.memblock)
1280 pa_memblock_unref(info->chunk.memblock);
1284 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1285 pa_source_post(s->monitor_source, result);
1288 /* Called from IO thread context */
1289 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1290 pa_mix_info info[MAX_MIX_CHANNELS];
1292 size_t block_size_max;
1294 pa_sink_assert_ref(s);
1295 pa_sink_assert_io_context(s);
1296 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1297 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1300 pa_assert(!s->thread_info.rewind_requested);
1301 pa_assert(s->thread_info.rewind_nbytes == 0);
1303 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1304 result->memblock = pa_memblock_ref(s->silence.memblock);
1305 result->index = s->silence.index;
1306 result->length = PA_MIN(s->silence.length, length);
1313 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1315 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1316 if (length > block_size_max)
1317 length = pa_frame_align(block_size_max, &s->sample_spec);
1319 pa_assert(length > 0);
1321 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1325 *result = s->silence;
1326 pa_memblock_ref(result->memblock);
1328 if (result->length > length)
1329 result->length = length;
1331 } else if (n == 1) {
1334 *result = info[0].chunk;
1335 pa_memblock_ref(result->memblock);
1337 if (result->length > length)
1338 result->length = length;
1340 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1342 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1343 pa_memblock_unref(result->memblock);
1344 pa_silence_memchunk_get(&s->core->silence_cache,
1349 } else if (!pa_cvolume_is_norm(&volume)) {
1350 pa_memchunk_make_writable(result, 0);
1351 pa_volume_memchunk(result, &s->sample_spec, &volume);
1355 result->memblock = pa_memblock_new(s->core->mempool, length);
1357 ptr = pa_memblock_acquire(result->memblock);
1358 result->length = pa_mix(info, n,
1361 &s->thread_info.soft_volume,
1362 s->thread_info.soft_muted);
1363 pa_memblock_release(result->memblock);
1368 inputs_drop(s, info, n, result);
1370 #ifdef TIZEN_PCM_DUMP
1371 pa_sink_write_pcm_dump(s, result);
1376 /* Called from IO thread context */
1377 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1378 pa_mix_info info[MAX_MIX_CHANNELS];
1380 size_t length, block_size_max;
1382 pa_sink_assert_ref(s);
1383 pa_sink_assert_io_context(s);
1384 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1386 pa_assert(target->memblock);
1387 pa_assert(target->length > 0);
1388 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1390 pa_assert(!s->thread_info.rewind_requested);
1391 pa_assert(s->thread_info.rewind_nbytes == 0);
1393 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1394 pa_silence_memchunk(target, &s->sample_spec);
1400 length = target->length;
1401 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1402 if (length > block_size_max)
1403 length = pa_frame_align(block_size_max, &s->sample_spec);
1405 pa_assert(length > 0);
1407 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1410 if (target->length > length)
1411 target->length = length;
1413 pa_silence_memchunk(target, &s->sample_spec);
1414 } else if (n == 1) {
1417 if (target->length > length)
1418 target->length = length;
1420 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1422 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1423 pa_silence_memchunk(target, &s->sample_spec);
1427 vchunk = info[0].chunk;
1428 pa_memblock_ref(vchunk.memblock);
1430 if (vchunk.length > length)
1431 vchunk.length = length;
1433 if (!pa_cvolume_is_norm(&volume)) {
1434 pa_memchunk_make_writable(&vchunk, 0);
1435 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1438 pa_memchunk_memcpy(target, &vchunk);
1439 pa_memblock_unref(vchunk.memblock);
1445 ptr = pa_memblock_acquire(target->memblock);
1447 target->length = pa_mix(info, n,
1448 (uint8_t*) ptr + target->index, length,
1450 &s->thread_info.soft_volume,
1451 s->thread_info.soft_muted);
1453 pa_memblock_release(target->memblock);
1456 inputs_drop(s, info, n, target);
1458 #ifdef TIZEN_PCM_DUMP
1459 pa_sink_write_pcm_dump(s, target);
1464 /* Called from IO thread context */
1465 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1469 pa_sink_assert_ref(s);
1470 pa_sink_assert_io_context(s);
1471 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1473 pa_assert(target->memblock);
1474 pa_assert(target->length > 0);
1475 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1477 pa_assert(!s->thread_info.rewind_requested);
1478 pa_assert(s->thread_info.rewind_nbytes == 0);
1480 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1481 pa_silence_memchunk(target, &s->sample_spec);
1494 pa_sink_render_into(s, &chunk);
1503 /* Called from IO thread context */
1504 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1505 pa_sink_assert_ref(s);
1506 pa_sink_assert_io_context(s);
1507 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1508 pa_assert(length > 0);
1509 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1512 pa_assert(!s->thread_info.rewind_requested);
1513 pa_assert(s->thread_info.rewind_nbytes == 0);
1517 pa_sink_render(s, length, result);
1519 if (result->length < length) {
1522 pa_memchunk_make_writable(result, length);
1524 chunk.memblock = result->memblock;
1525 chunk.index = result->index + result->length;
1526 chunk.length = length - result->length;
1528 pa_sink_render_into_full(s, &chunk);
1530 result->length = length;
1536 /* Called from main thread */
1537 int pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1539 pa_sample_spec desired_spec;
1540 uint32_t default_rate = s->default_sample_rate;
1541 uint32_t alternate_rate = s->alternate_sample_rate;
1544 bool default_rate_is_usable = false;
1545 bool alternate_rate_is_usable = false;
1546 bool avoid_resampling = s->core->avoid_resampling;
1548 /* We currently only try to reconfigure the sample rate */
1550 if (pa_sample_spec_equal(spec, &s->sample_spec))
1553 if (!s->reconfigure)
1556 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1557 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1561 if (PA_SINK_IS_RUNNING(s->state)) {
1562 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1563 s->sample_spec.rate);
1567 if (s->monitor_source) {
1568 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1569 pa_log_info("Cannot update rate, monitor source is RUNNING");
1574 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1577 desired_spec = s->sample_spec;
1580 /* We have to try to use the sink input rate */
1581 desired_spec.rate = spec->rate;
1583 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1584 /* We just try to set the sink input's sample rate if it's not too low */
1585 desired_spec.rate = spec->rate;
1587 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1588 /* We can directly try to use this rate */
1589 desired_spec.rate = spec->rate;
1592 /* See if we can pick a rate that results in less resampling effort */
1593 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1594 default_rate_is_usable = true;
1595 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1596 default_rate_is_usable = true;
1597 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1598 alternate_rate_is_usable = true;
1599 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1600 alternate_rate_is_usable = true;
1602 if (alternate_rate_is_usable && !default_rate_is_usable)
1603 desired_spec.rate = alternate_rate;
1605 desired_spec.rate = default_rate;
1608 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1611 if (!passthrough && pa_sink_used_by(s) > 0)
1614 pa_log_debug("Suspending sink %s due to changing format.", s->name);
1615 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1617 if (s->reconfigure(s, &desired_spec, passthrough) >= 0) {
1618 /* update monitor source as well */
1619 if (s->monitor_source && !passthrough)
1620 pa_source_reconfigure(s->monitor_source, &desired_spec, false);
1621 pa_log_info("Changed format successfully");
1623 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1624 if (i->state == PA_SINK_INPUT_CORKED)
1625 pa_sink_input_update_rate(i);
1631 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1636 /* Called from main thread */
1637 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1640 pa_sink_assert_ref(s);
1641 pa_assert_ctl_context();
1642 pa_assert(PA_SINK_IS_LINKED(s->state));
1644 /* The returned value is supposed to be in the time domain of the sound card! */
1646 if (s->state == PA_SINK_SUSPENDED)
1649 if (!(s->flags & PA_SINK_LATENCY))
1652 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1654 /* the return value is unsigned, so check that the offset can be added to usec without
1656 if (-s->port_latency_offset <= usec)
1657 usec += s->port_latency_offset;
1661 return (pa_usec_t)usec;
1664 /* Called from IO thread */
1665 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1669 pa_sink_assert_ref(s);
1670 pa_sink_assert_io_context(s);
1671 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1673 /* The returned value is supposed to be in the time domain of the sound card! */
1675 if (s->thread_info.state == PA_SINK_SUSPENDED)
1678 if (!(s->flags & PA_SINK_LATENCY))
1681 o = PA_MSGOBJECT(s);
1683 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1685 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1687 /* If allow_negative is false, the call should only return positive values, */
1688 usec += s->thread_info.port_latency_offset;
1689 if (!allow_negative && usec < 0)
1695 /* Called from the main thread (and also from the IO thread while the main
1696 * thread is waiting).
1698 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1699 * set. Instead, flat volume mode is detected by checking whether the root sink
1700 * has the flag set. */
1701 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1702 pa_sink_assert_ref(s);
1704 s = pa_sink_get_master(s);
1707 return (s->flags & PA_SINK_FLAT_VOLUME);
1712 /* Called from the main thread (and also from the IO thread while the main
1713 * thread is waiting). */
1714 pa_sink *pa_sink_get_master(pa_sink *s) {
1715 pa_sink_assert_ref(s);
1717 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1718 if (PA_UNLIKELY(!s->input_to_master))
1721 s = s->input_to_master->sink;
1727 /* Called from main context */
1728 bool pa_sink_is_filter(pa_sink *s) {
1729 pa_sink_assert_ref(s);
1731 return (s->input_to_master != NULL);
1734 /* Called from main context */
1735 bool pa_sink_is_passthrough(pa_sink *s) {
1736 pa_sink_input *alt_i;
1739 pa_sink_assert_ref(s);
1741 /* one and only one PASSTHROUGH input can possibly be connected */
1742 if (pa_idxset_size(s->inputs) == 1) {
1743 alt_i = pa_idxset_first(s->inputs, &idx);
1745 if (pa_sink_input_is_passthrough(alt_i))
1752 /* Called from main context */
1753 void pa_sink_enter_passthrough(pa_sink *s) {
1756 /* The sink implementation is reconfigured for passthrough in
1757 * pa_sink_reconfigure(). This function sets the PA core objects to
1758 * passthrough mode. */
1760 /* disable the monitor in passthrough mode */
1761 if (s->monitor_source) {
1762 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1763 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1766 /* set the volume to NORM */
1767 s->saved_volume = *pa_sink_get_volume(s, true);
1768 s->saved_save_volume = s->save_volume;
1770 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1771 pa_sink_set_volume(s, &volume, true, false);
1773 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1776 /* Called from main context */
1777 void pa_sink_leave_passthrough(pa_sink *s) {
1778 /* Unsuspend monitor */
1779 if (s->monitor_source) {
1780 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1781 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1784 /* Restore sink volume to what it was before we entered passthrough mode */
1785 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1787 pa_cvolume_init(&s->saved_volume);
1788 s->saved_save_volume = false;
1792 /* Called from main context. */
1793 static void compute_reference_ratio(pa_sink_input *i) {
1795 pa_cvolume remapped;
1799 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1802 * Calculates the reference ratio from the sink's reference
1803 * volume. This basically calculates:
1805 * i->reference_ratio = i->volume / i->sink->reference_volume
1808 remapped = i->sink->reference_volume;
1809 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1811 ratio = i->reference_ratio;
1813 for (c = 0; c < i->sample_spec.channels; c++) {
1815 /* We don't update when the sink volume is 0 anyway */
1816 if (remapped.values[c] <= PA_VOLUME_MUTED)
1819 /* Don't update the reference ratio unless necessary */
1820 if (pa_sw_volume_multiply(
1822 remapped.values[c]) == i->volume.values[c])
1825 ratio.values[c] = pa_sw_volume_divide(
1826 i->volume.values[c],
1827 remapped.values[c]);
1830 pa_sink_input_set_reference_ratio(i, &ratio);
1833 /* Called from main context. Only called for the root sink in volume sharing
1834 * cases, except for internal recursive calls. */
1835 static void compute_reference_ratios(pa_sink *s) {
1839 pa_sink_assert_ref(s);
1840 pa_assert_ctl_context();
1841 pa_assert(PA_SINK_IS_LINKED(s->state));
1842 pa_assert(pa_sink_flat_volume_enabled(s));
1844 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1845 compute_reference_ratio(i);
1847 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1848 && PA_SINK_IS_LINKED(i->origin_sink->state))
1849 compute_reference_ratios(i->origin_sink);
1853 /* Called from main context. Only called for the root sink in volume sharing
1854 * cases, except for internal recursive calls. */
1855 static void compute_real_ratios(pa_sink *s) {
1859 pa_sink_assert_ref(s);
1860 pa_assert_ctl_context();
1861 pa_assert(PA_SINK_IS_LINKED(s->state));
1862 pa_assert(pa_sink_flat_volume_enabled(s));
1864 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1866 pa_cvolume remapped;
1868 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1869 /* The origin sink uses volume sharing, so this input's real ratio
1870 * is handled as a special case - the real ratio must be 0 dB, and
1871 * as a result i->soft_volume must equal i->volume_factor. */
1872 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1873 i->soft_volume = i->volume_factor;
1875 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1876 compute_real_ratios(i->origin_sink);
1882 * This basically calculates:
1884 * i->real_ratio := i->volume / s->real_volume
1885 * i->soft_volume := i->real_ratio * i->volume_factor
1888 remapped = s->real_volume;
1889 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1891 i->real_ratio.channels = i->sample_spec.channels;
1892 i->soft_volume.channels = i->sample_spec.channels;
1894 for (c = 0; c < i->sample_spec.channels; c++) {
1896 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1897 /* We leave i->real_ratio untouched */
1898 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1902 /* Don't lose accuracy unless necessary */
1903 if (pa_sw_volume_multiply(
1904 i->real_ratio.values[c],
1905 remapped.values[c]) != i->volume.values[c])
1907 i->real_ratio.values[c] = pa_sw_volume_divide(
1908 i->volume.values[c],
1909 remapped.values[c]);
1911 i->soft_volume.values[c] = pa_sw_volume_multiply(
1912 i->real_ratio.values[c],
1913 i->volume_factor.values[c]);
1916 /* We don't copy the soft_volume to the thread_info data
1917 * here. That must be done by the caller */
1921 static pa_cvolume *cvolume_remap_minimal_impact(
1923 const pa_cvolume *template,
1924 const pa_channel_map *from,
1925 const pa_channel_map *to) {
1930 pa_assert(template);
1933 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1934 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1936 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1937 * mapping from sink input to sink volumes:
1939 * If template is a possible remapping from v it is used instead
1940 * of remapping anew.
1942 * If the channel maps don't match we set an all-channel volume on
1943 * the sink to ensure that changing a volume on one stream has no
1944 * effect that cannot be compensated for in another stream that
1945 * does not have the same channel map as the sink. */
1947 if (pa_channel_map_equal(from, to))
1951 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1956 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1960 /* Called from main thread. Only called for the root sink in volume sharing
1961 * cases, except for internal recursive calls. */
1962 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1966 pa_sink_assert_ref(s);
1967 pa_assert(max_volume);
1968 pa_assert(channel_map);
1969 pa_assert(pa_sink_flat_volume_enabled(s));
1971 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1972 pa_cvolume remapped;
1974 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1975 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1976 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1978 /* Ignore this input. The origin sink uses volume sharing, so this
1979 * input's volume will be set to be equal to the root sink's real
1980 * volume. Obviously this input's current volume must not then
1981 * affect what the root sink's real volume will be. */
1985 remapped = i->volume;
1986 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1987 pa_cvolume_merge(max_volume, max_volume, &remapped);
1991 /* Called from main thread. Only called for the root sink in volume sharing
1992 * cases, except for internal recursive calls. */
1993 static bool has_inputs(pa_sink *s) {
1997 pa_sink_assert_ref(s);
1999 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2000 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2007 /* Called from main thread. Only called for the root sink in volume sharing
2008 * cases, except for internal recursive calls. */
2009 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2013 pa_sink_assert_ref(s);
2014 pa_assert(new_volume);
2015 pa_assert(channel_map);
2017 s->real_volume = *new_volume;
2018 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2020 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2021 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2022 if (pa_sink_flat_volume_enabled(s)) {
2023 pa_cvolume new_input_volume;
2025 /* Follow the root sink's real volume. */
2026 new_input_volume = *new_volume;
2027 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2028 pa_sink_input_set_volume_direct(i, &new_input_volume);
2029 compute_reference_ratio(i);
2032 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2033 update_real_volume(i->origin_sink, new_volume, channel_map);
2038 /* Called from main thread. Only called for the root sink in shared volume
2040 static void compute_real_volume(pa_sink *s) {
2041 pa_sink_assert_ref(s);
2042 pa_assert_ctl_context();
2043 pa_assert(PA_SINK_IS_LINKED(s->state));
2044 pa_assert(pa_sink_flat_volume_enabled(s));
2045 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2047 /* This determines the maximum volume of all streams and sets
2048 * s->real_volume accordingly. */
2050 if (!has_inputs(s)) {
2051 /* In the special case that we have no sink inputs we leave the
2052 * volume unmodified. */
2053 update_real_volume(s, &s->reference_volume, &s->channel_map);
2057 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2059 /* First let's determine the new maximum volume of all inputs
2060 * connected to this sink */
2061 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2062 update_real_volume(s, &s->real_volume, &s->channel_map);
2064 /* Then, let's update the real ratios/soft volumes of all inputs
2065 * connected to this sink */
2066 compute_real_ratios(s);
2069 /* Called from main thread. Only called for the root sink in shared volume
2070 * cases, except for internal recursive calls. */
2071 static void propagate_reference_volume(pa_sink *s) {
2075 pa_sink_assert_ref(s);
2076 pa_assert_ctl_context();
2077 pa_assert(PA_SINK_IS_LINKED(s->state));
2078 pa_assert(pa_sink_flat_volume_enabled(s));
2080 /* This is called whenever the sink volume changes that is not
2081 * caused by a sink input volume change. We need to fix up the
2082 * sink input volumes accordingly */
2084 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2085 pa_cvolume new_volume;
2087 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2088 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2089 propagate_reference_volume(i->origin_sink);
2091 /* Since the origin sink uses volume sharing, this input's volume
2092 * needs to be updated to match the root sink's real volume, but
2093 * that will be done later in update_real_volume(). */
2097 /* This basically calculates:
2099 * i->volume := s->reference_volume * i->reference_ratio */
2101 new_volume = s->reference_volume;
2102 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2103 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2104 pa_sink_input_set_volume_direct(i, &new_volume);
2108 /* Called from main thread. Only called for the root sink in volume sharing
2109 * cases, except for internal recursive calls. The return value indicates
2110 * whether any reference volume actually changed. */
2111 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2113 bool reference_volume_changed;
2117 pa_sink_assert_ref(s);
2118 pa_assert(PA_SINK_IS_LINKED(s->state));
2120 pa_assert(channel_map);
2121 pa_assert(pa_cvolume_valid(v));
2124 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2126 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2127 pa_sink_set_reference_volume_direct(s, &volume);
2129 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2131 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2132 /* If the root sink's volume doesn't change, then there can't be any
2133 * changes in the other sinks in the sink tree either.
2135 * It's probably theoretically possible that even if the root sink's
2136 * volume changes slightly, some filter sink doesn't change its volume
2137 * due to rounding errors. If that happens, we still want to propagate
2138 * the changed root sink volume to the sinks connected to the
2139 * intermediate sink that didn't change its volume. This theoretical
2140 * possibility is the reason why we have that !(s->flags &
2141 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2142 * notice even if we returned here false always if
2143 * reference_volume_changed is false. */
2146 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2147 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2148 && PA_SINK_IS_LINKED(i->origin_sink->state))
2149 update_reference_volume(i->origin_sink, v, channel_map, false);
2155 /* Called from main thread */
2156 void pa_sink_set_volume(
2158 const pa_cvolume *volume,
2162 pa_cvolume new_reference_volume;
2165 pa_sink_assert_ref(s);
2166 pa_assert_ctl_context();
2167 pa_assert(PA_SINK_IS_LINKED(s->state));
2168 pa_assert(!volume || pa_cvolume_valid(volume));
2169 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2170 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2172 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2173 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2174 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2175 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2179 /* In case of volume sharing, the volume is set for the root sink first,
2180 * from which it's then propagated to the sharing sinks. */
2181 root_sink = pa_sink_get_master(s);
2183 if (PA_UNLIKELY(!root_sink))
2186 /* As a special exception we accept mono volumes on all sinks --
2187 * even on those with more complex channel maps */
2190 if (pa_cvolume_compatible(volume, &s->sample_spec))
2191 new_reference_volume = *volume;
2193 new_reference_volume = s->reference_volume;
2194 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2197 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2199 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2200 if (pa_sink_flat_volume_enabled(root_sink)) {
2201 /* OK, propagate this volume change back to the inputs */
2202 propagate_reference_volume(root_sink);
2204 /* And now recalculate the real volume */
2205 compute_real_volume(root_sink);
2207 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2211 /* If volume is NULL we synchronize the sink's real and
2212 * reference volumes with the stream volumes. */
2214 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2216 /* Ok, let's determine the new real volume */
2217 compute_real_volume(root_sink);
2219 /* Let's 'push' the reference volume if necessary */
2220 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2221 /* If the sink and its root don't have the same number of channels, we need to remap */
2222 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2223 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2224 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2226 /* Now that the reference volume is updated, we can update the streams'
2227 * reference ratios. */
2228 compute_reference_ratios(root_sink);
2231 if (root_sink->set_volume) {
2232 /* If we have a function set_volume(), then we do not apply a
2233 * soft volume by default. However, set_volume() is free to
2234 * apply one to root_sink->soft_volume */
2236 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2237 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2238 root_sink->set_volume(root_sink);
2241 /* If we have no function set_volume(), then the soft volume
2242 * becomes the real volume */
2243 root_sink->soft_volume = root_sink->real_volume;
2245 /* This tells the sink that soft volume and/or real volume changed */
2247 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2250 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2251 * Only to be called by sink implementor */
2252 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2254 pa_sink_assert_ref(s);
2255 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2257 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2258 pa_sink_assert_io_context(s);
2260 pa_assert_ctl_context();
2263 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2265 s->soft_volume = *volume;
2267 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2268 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2270 s->thread_info.soft_volume = s->soft_volume;
2273 /* Called from the main thread. Only called for the root sink in volume sharing
2274 * cases, except for internal recursive calls. */
2275 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2279 pa_sink_assert_ref(s);
2280 pa_assert(old_real_volume);
2281 pa_assert_ctl_context();
2282 pa_assert(PA_SINK_IS_LINKED(s->state));
2284 /* This is called when the hardware's real volume changes due to
2285 * some external event. We copy the real volume into our
2286 * reference volume and then rebuild the stream volumes based on
2287 * i->real_ratio which should stay fixed. */
2289 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2290 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2293 /* 1. Make the real volume the reference volume */
2294 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2297 if (pa_sink_flat_volume_enabled(s)) {
2299 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2300 pa_cvolume new_volume;
2302 /* 2. Since the sink's reference and real volumes are equal
2303 * now our ratios should be too. */
2304 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2306 /* 3. Recalculate the new stream reference volume based on the
2307 * reference ratio and the sink's reference volume.
2309 * This basically calculates:
2311 * i->volume = s->reference_volume * i->reference_ratio
2313 * This is identical to propagate_reference_volume() */
2314 new_volume = s->reference_volume;
2315 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2316 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2317 pa_sink_input_set_volume_direct(i, &new_volume);
2319 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2320 && PA_SINK_IS_LINKED(i->origin_sink->state))
2321 propagate_real_volume(i->origin_sink, old_real_volume);
2325 /* Something got changed in the hardware. It probably makes sense
2326 * to save changed hw settings given that hw volume changes not
2327 * triggered by PA are almost certainly done by the user. */
2328 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2329 s->save_volume = true;
2332 /* Called from io thread */
2333 void pa_sink_update_volume_and_mute(pa_sink *s) {
2335 pa_sink_assert_io_context(s);
2337 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2340 /* Called from main thread */
2341 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2342 pa_sink_assert_ref(s);
2343 pa_assert_ctl_context();
2344 pa_assert(PA_SINK_IS_LINKED(s->state));
2346 if (s->refresh_volume || force_refresh) {
2347 struct pa_cvolume old_real_volume;
2349 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2351 old_real_volume = s->real_volume;
2353 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2356 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2358 update_real_volume(s, &s->real_volume, &s->channel_map);
2359 propagate_real_volume(s, &old_real_volume);
2362 return &s->reference_volume;
2365 /* Called from main thread. In volume sharing cases, only the root sink may
2367 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2368 pa_cvolume old_real_volume;
2370 pa_sink_assert_ref(s);
2371 pa_assert_ctl_context();
2372 pa_assert(PA_SINK_IS_LINKED(s->state));
2373 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2375 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2377 old_real_volume = s->real_volume;
2378 update_real_volume(s, new_real_volume, &s->channel_map);
2379 propagate_real_volume(s, &old_real_volume);
2382 /* Called from main thread */
2383 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2386 pa_sink_assert_ref(s);
2387 pa_assert_ctl_context();
2389 old_muted = s->muted;
2391 if (mute == old_muted) {
2392 s->save_muted |= save;
2397 s->save_muted = save;
2399 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2400 s->set_mute_in_progress = true;
2402 s->set_mute_in_progress = false;
2405 if (!PA_SINK_IS_LINKED(s->state))
2408 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2409 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2410 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2411 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2414 /* Called from main thread */
2415 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2417 pa_sink_assert_ref(s);
2418 pa_assert_ctl_context();
2419 pa_assert(PA_SINK_IS_LINKED(s->state));
2421 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2424 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2425 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2426 pa_sink_mute_changed(s, mute);
2428 if (s->get_mute(s, &mute) >= 0)
2429 pa_sink_mute_changed(s, mute);
2436 /* Called from main thread */
2437 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2438 pa_sink_assert_ref(s);
2439 pa_assert_ctl_context();
2440 pa_assert(PA_SINK_IS_LINKED(s->state));
2442 if (s->set_mute_in_progress)
2445 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2446 * but we must have this here also, because the save parameter of
2447 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2448 * the mute state when it shouldn't be saved). */
2449 if (new_muted == s->muted)
2452 pa_sink_set_mute(s, new_muted, true);
2455 /* Called from main thread */
2456 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2457 pa_sink_assert_ref(s);
2458 pa_assert_ctl_context();
2461 pa_proplist_update(s->proplist, mode, p);
2463 if (PA_SINK_IS_LINKED(s->state)) {
2464 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2465 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2471 /* Called from main thread */
2472 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2473 void pa_sink_set_description(pa_sink *s, const char *description) {
2475 pa_sink_assert_ref(s);
2476 pa_assert_ctl_context();
2478 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2481 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2483 if (old && description && pa_streq(old, description))
2487 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2489 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2491 if (s->monitor_source) {
2494 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2495 pa_source_set_description(s->monitor_source, n);
2499 if (PA_SINK_IS_LINKED(s->state)) {
2500 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2501 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2505 /* Called from main thread */
2506 unsigned pa_sink_linked_by(pa_sink *s) {
2509 pa_sink_assert_ref(s);
2510 pa_assert_ctl_context();
2511 pa_assert(PA_SINK_IS_LINKED(s->state));
2513 ret = pa_idxset_size(s->inputs);
2515 /* We add in the number of streams connected to us here. Please
2516 * note the asymmetry to pa_sink_used_by()! */
2518 if (s->monitor_source)
2519 ret += pa_source_linked_by(s->monitor_source);
2524 /* Called from main thread */
2525 unsigned pa_sink_used_by(pa_sink *s) {
2528 pa_sink_assert_ref(s);
2529 pa_assert_ctl_context();
2530 pa_assert(PA_SINK_IS_LINKED(s->state));
2532 ret = pa_idxset_size(s->inputs);
2533 pa_assert(ret >= s->n_corked);
2535 /* Streams connected to our monitor source do not matter for
2536 * pa_sink_used_by()!.*/
2538 return ret - s->n_corked;
2541 /* Called from main thread */
2542 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2547 pa_sink_assert_ref(s);
2548 pa_assert_ctl_context();
2550 if (!PA_SINK_IS_LINKED(s->state))
2555 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2556 pa_sink_input_state_t st;
2558 if (i == ignore_input)
2561 st = pa_sink_input_get_state(i);
2563 /* We do not assert here. It is perfectly valid for a sink input to
2564 * be in the INIT state (i.e. created, marked done but not yet put)
2565 * and we should not care if it's unlinked as it won't contribute
2566 * towards our busy status.
2568 if (!PA_SINK_INPUT_IS_LINKED(st))
2571 if (st == PA_SINK_INPUT_CORKED)
2574 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2580 if (s->monitor_source)
2581 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2586 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2588 case PA_SINK_INIT: return "INIT";
2589 case PA_SINK_IDLE: return "IDLE";
2590 case PA_SINK_RUNNING: return "RUNNING";
2591 case PA_SINK_SUSPENDED: return "SUSPENDED";
2592 case PA_SINK_UNLINKED: return "UNLINKED";
2593 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2596 pa_assert_not_reached();
2599 /* Called from the IO thread */
2600 static void sync_input_volumes_within_thread(pa_sink *s) {
2604 pa_sink_assert_ref(s);
2605 pa_sink_assert_io_context(s);
2607 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2608 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2611 i->thread_info.soft_volume = i->soft_volume;
2612 pa_sink_input_request_rewind(i, 0, true, false, false);
2616 /* Called from the IO thread. Only called for the root sink in volume sharing
2617 * cases, except for internal recursive calls. */
2618 static void set_shared_volume_within_thread(pa_sink *s) {
2619 pa_sink_input *i = NULL;
2622 pa_sink_assert_ref(s);
2624 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2626 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2627 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2628 set_shared_volume_within_thread(i->origin_sink);
2632 /* Called from IO thread, except when it is not */
2633 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2634 pa_sink *s = PA_SINK(o);
2635 pa_sink_assert_ref(s);
2637 switch ((pa_sink_message_t) code) {
2639 case PA_SINK_MESSAGE_ADD_INPUT: {
2640 pa_sink_input *i = PA_SINK_INPUT(userdata);
2642 /* If you change anything here, make sure to change the
2643 * sink input handling a few lines down at
2644 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2646 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2648 /* Since the caller sleeps in pa_sink_input_put(), we can
2649 * safely access data outside of thread_info even though
2652 if ((i->thread_info.sync_prev = i->sync_prev)) {
2653 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2654 pa_assert(i->sync_prev->sync_next == i);
2655 i->thread_info.sync_prev->thread_info.sync_next = i;
2658 if ((i->thread_info.sync_next = i->sync_next)) {
2659 pa_assert(i->sink == i->thread_info.sync_next->sink);
2660 pa_assert(i->sync_next->sync_prev == i);
2661 i->thread_info.sync_next->thread_info.sync_prev = i;
2664 pa_sink_input_attach(i);
2666 pa_sink_input_set_state_within_thread(i, i->state);
2668 /* The requested latency of the sink input needs to be fixed up and
2669 * then configured on the sink. If this causes the sink latency to
2670 * go down, the sink implementor is responsible for doing a rewind
2671 * in the update_requested_latency() callback to ensure that the
2672 * sink buffer doesn't contain more data than what the new latency
2675 * XXX: Does it really make sense to push this responsibility to
2676 * the sink implementors? Wouldn't it be better to do it once in
2677 * the core than many times in the modules? */
2679 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2680 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2682 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2683 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2685 /* We don't rewind here automatically. This is left to the
2686 * sink input implementor because some sink inputs need a
2687 * slow start, i.e. need some time to buffer client
2688 * samples before beginning streaming.
2690 * XXX: Does it really make sense to push this functionality to
2691 * the sink implementors? Wouldn't it be better to do it once in
2692 * the core than many times in the modules? */
2694 /* In flat volume mode we need to update the volume as
2696 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2699 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2700 pa_sink_input *i = PA_SINK_INPUT(userdata);
2702 /* If you change anything here, make sure to change the
2703 * sink input handling a few lines down at
2704 * PA_SINK_MESSAGE_START_MOVE, too. */
2706 pa_sink_input_detach(i);
2708 pa_sink_input_set_state_within_thread(i, i->state);
2710 /* Since the caller sleeps in pa_sink_input_unlink(),
2711 * we can safely access data outside of thread_info even
2712 * though it is mutable */
2714 pa_assert(!i->sync_prev);
2715 pa_assert(!i->sync_next);
2717 if (i->thread_info.sync_prev) {
2718 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2719 i->thread_info.sync_prev = NULL;
2722 if (i->thread_info.sync_next) {
2723 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2724 i->thread_info.sync_next = NULL;
2727 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2728 pa_sink_invalidate_requested_latency(s, true);
2729 pa_sink_request_rewind(s, (size_t) -1);
2731 /* In flat volume mode we need to update the volume as
2733 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2736 case PA_SINK_MESSAGE_START_MOVE: {
2737 pa_sink_input *i = PA_SINK_INPUT(userdata);
2739 /* We don't support moving synchronized streams. */
2740 pa_assert(!i->sync_prev);
2741 pa_assert(!i->sync_next);
2742 pa_assert(!i->thread_info.sync_next);
2743 pa_assert(!i->thread_info.sync_prev);
2745 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2747 size_t sink_nbytes, total_nbytes;
2749 /* The old sink probably has some audio from this
2750 * stream in its buffer. We want to "take it back" as
2751 * much as possible and play it to the new sink. We
2752 * don't know at this point how much the old sink can
2753 * rewind. We have to pick something, and that
2754 * something is the full latency of the old sink here.
2755 * So we rewind the stream buffer by the sink latency
2756 * amount, which may be more than what we should
2757 * rewind. This can result in a chunk of audio being
2758 * played both to the old sink and the new sink.
2760 * FIXME: Fix this code so that we don't have to make
2761 * guesses about how much the sink will actually be
2762 * able to rewind. If someone comes up with a solution
2763 * for this, something to note is that the part of the
2764 * latency that the old sink couldn't rewind should
2765 * ideally be compensated after the stream has moved
2766 * to the new sink by adding silence. The new sink
2767 * most likely can't start playing the moved stream
2768 * immediately, and that gap should be removed from
2769 * the "compensation silence" (at least at the time of
2770 * writing this, the move finish code will actually
2771 * already take care of dropping the new sink's
2772 * unrewindable latency, so taking into account the
2773 * unrewindable latency of the old sink is the only
2776 * The render_memblockq contents are discarded,
2777 * because when the sink changes, the format of the
2778 * audio stored in the render_memblockq may change
2779 * too, making the stored audio invalid. FIXME:
2780 * However, the read and write indices are moved back
2781 * the same amount, so if they are not the same now,
2782 * they won't be the same after the rewind either. If
2783 * the write index of the render_memblockq is ahead of
2784 * the read index, then the render_memblockq will feed
2785 * the new sink some silence first, which it shouldn't
2786 * do. The write index should be flushed to be the
2787 * same as the read index. */
2789 /* Get the latency of the sink */
2790 usec = pa_sink_get_latency_within_thread(s, false);
2791 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2792 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2794 if (total_nbytes > 0) {
2795 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2796 i->thread_info.rewrite_flush = true;
2797 pa_sink_input_process_rewind(i, sink_nbytes);
2801 pa_sink_input_detach(i);
2803 /* Let's remove the sink input ...*/
2804 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2806 pa_sink_invalidate_requested_latency(s, true);
2808 pa_log_debug("Requesting rewind due to started move");
2809 pa_sink_request_rewind(s, (size_t) -1);
2811 /* In flat volume mode we need to update the volume as
2813 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2816 case PA_SINK_MESSAGE_FINISH_MOVE: {
2817 pa_sink_input *i = PA_SINK_INPUT(userdata);
2819 /* We don't support moving synchronized streams. */
2820 pa_assert(!i->sync_prev);
2821 pa_assert(!i->sync_next);
2822 pa_assert(!i->thread_info.sync_next);
2823 pa_assert(!i->thread_info.sync_prev);
2825 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2827 pa_sink_input_attach(i);
2829 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2833 /* In the ideal case the new sink would start playing
2834 * the stream immediately. That requires the sink to
2835 * be able to rewind all of its latency, which usually
2836 * isn't possible, so there will probably be some gap
2837 * before the moved stream becomes audible. We then
2838 * have two possibilities: 1) start playing the stream
2839 * from where it is now, or 2) drop the unrewindable
2840 * latency of the sink from the stream. With option 1
2841 * we won't lose any audio but the stream will have a
2842 * pause. With option 2 we may lose some audio but the
2843 * stream time will be somewhat in sync with the wall
2844 * clock. Lennart seems to have chosen option 2 (one
2845 * of the reasons might have been that option 1 is
2846 * actually much harder to implement), so we drop the
2847 * latency of the new sink from the moved stream and
2848 * hope that the sink will undo most of that in the
2851 /* Get the latency of the sink */
2852 usec = pa_sink_get_latency_within_thread(s, false);
2853 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2856 pa_sink_input_drop(i, nbytes);
2858 pa_log_debug("Requesting rewind due to finished move");
2859 pa_sink_request_rewind(s, nbytes);
2862 /* Updating the requested sink latency has to be done
2863 * after the sink rewind request, not before, because
2864 * otherwise the sink may limit the rewind amount
2867 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2868 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2870 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2871 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2873 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2876 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2877 pa_sink *root_sink = pa_sink_get_master(s);
2879 if (PA_LIKELY(root_sink))
2880 set_shared_volume_within_thread(root_sink);
2885 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2887 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2889 pa_sink_volume_change_push(s);
2891 /* Fall through ... */
2893 case PA_SINK_MESSAGE_SET_VOLUME:
2895 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2896 s->thread_info.soft_volume = s->soft_volume;
2897 pa_sink_request_rewind(s, (size_t) -1);
2900 /* Fall through ... */
2902 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2903 sync_input_volumes_within_thread(s);
2906 case PA_SINK_MESSAGE_GET_VOLUME:
2908 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2910 pa_sink_volume_change_flush(s);
2911 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2914 /* In case sink implementor reset SW volume. */
2915 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2916 s->thread_info.soft_volume = s->soft_volume;
2917 pa_sink_request_rewind(s, (size_t) -1);
2922 case PA_SINK_MESSAGE_SET_MUTE:
2924 if (s->thread_info.soft_muted != s->muted) {
2925 s->thread_info.soft_muted = s->muted;
2926 pa_sink_request_rewind(s, (size_t) -1);
2929 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2934 case PA_SINK_MESSAGE_GET_MUTE:
2936 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2937 return s->get_mute(s, userdata);
2941 case PA_SINK_MESSAGE_SET_STATE: {
2943 bool suspend_change =
2944 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2945 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2947 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2949 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2950 s->thread_info.rewind_nbytes = 0;
2951 s->thread_info.rewind_requested = false;
2954 if (suspend_change) {
2958 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2959 if (i->suspend_within_thread)
2960 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2966 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2968 pa_usec_t *usec = userdata;
2969 *usec = pa_sink_get_requested_latency_within_thread(s);
2971 /* Yes, that's right, the IO thread will see -1 when no
2972 * explicit requested latency is configured, the main
2973 * thread will see max_latency */
2974 if (*usec == (pa_usec_t) -1)
2975 *usec = s->thread_info.max_latency;
2980 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2981 pa_usec_t *r = userdata;
2983 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2988 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2989 pa_usec_t *r = userdata;
2991 r[0] = s->thread_info.min_latency;
2992 r[1] = s->thread_info.max_latency;
2997 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2999 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3002 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3004 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3007 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3009 *((size_t*) userdata) = s->thread_info.max_rewind;
3012 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3014 *((size_t*) userdata) = s->thread_info.max_request;
3017 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3019 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3022 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3024 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3027 case PA_SINK_MESSAGE_SET_PORT:
3029 pa_assert(userdata);
3031 struct sink_message_set_port *msg_data = userdata;
3032 msg_data->ret = s->set_port(s, msg_data->port);
3036 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3037 /* This message is sent from IO-thread and handled in main thread. */
3038 pa_assert_ctl_context();
3040 /* Make sure we're not messing with main thread when no longer linked */
3041 if (!PA_SINK_IS_LINKED(s->state))
3044 pa_sink_get_volume(s, true);
3045 pa_sink_get_mute(s, true);
3048 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3049 s->thread_info.port_latency_offset = offset;
3052 case PA_SINK_MESSAGE_GET_LATENCY:
3053 case PA_SINK_MESSAGE_MAX:
3060 /* Called from main thread */
3061 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3066 pa_core_assert_ref(c);
3067 pa_assert_ctl_context();
3068 pa_assert(cause != 0);
3070 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3073 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3080 /* Called from IO thread */
3081 void pa_sink_detach_within_thread(pa_sink *s) {
3085 pa_sink_assert_ref(s);
3086 pa_sink_assert_io_context(s);
3087 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3089 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3090 pa_sink_input_detach(i);
3092 if (s->monitor_source)
3093 pa_source_detach_within_thread(s->monitor_source);
3096 /* Called from IO thread */
3097 void pa_sink_attach_within_thread(pa_sink *s) {
3101 pa_sink_assert_ref(s);
3102 pa_sink_assert_io_context(s);
3103 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3105 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3106 pa_sink_input_attach(i);
3108 if (s->monitor_source)
3109 pa_source_attach_within_thread(s->monitor_source);
3112 /* Called from IO thread */
3113 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3114 pa_sink_assert_ref(s);
3115 pa_sink_assert_io_context(s);
3116 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3118 if (nbytes == (size_t) -1)
3119 nbytes = s->thread_info.max_rewind;
3121 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3123 if (s->thread_info.rewind_requested &&
3124 nbytes <= s->thread_info.rewind_nbytes)
3127 s->thread_info.rewind_nbytes = nbytes;
3128 s->thread_info.rewind_requested = true;
3130 if (s->request_rewind)
3131 s->request_rewind(s);
3134 /* Called from IO thread */
3135 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3136 pa_usec_t result = (pa_usec_t) -1;
3139 pa_usec_t monitor_latency;
3141 pa_sink_assert_ref(s);
3142 pa_sink_assert_io_context(s);
3144 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3145 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3147 if (s->thread_info.requested_latency_valid)
3148 return s->thread_info.requested_latency;
3150 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3151 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3152 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3153 result = i->thread_info.requested_sink_latency;
3155 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3157 if (monitor_latency != (pa_usec_t) -1 &&
3158 (result == (pa_usec_t) -1 || result > monitor_latency))
3159 result = monitor_latency;
3161 if (result != (pa_usec_t) -1)
3162 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3164 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3165 /* Only cache if properly initialized */
3166 s->thread_info.requested_latency = result;
3167 s->thread_info.requested_latency_valid = true;
3173 /* Called from main thread */
3174 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3177 pa_sink_assert_ref(s);
3178 pa_assert_ctl_context();
3179 pa_assert(PA_SINK_IS_LINKED(s->state));
3181 if (s->state == PA_SINK_SUSPENDED)
3184 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3189 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3190 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3194 pa_sink_assert_ref(s);
3195 pa_sink_assert_io_context(s);
3197 if (max_rewind == s->thread_info.max_rewind)
3200 s->thread_info.max_rewind = max_rewind;
3202 if (PA_SINK_IS_LINKED(s->thread_info.state))
3203 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3204 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3206 if (s->monitor_source)
3207 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3210 /* Called from main thread */
3211 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3212 pa_sink_assert_ref(s);
3213 pa_assert_ctl_context();
3215 if (PA_SINK_IS_LINKED(s->state))
3216 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3218 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3221 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3222 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3225 pa_sink_assert_ref(s);
3226 pa_sink_assert_io_context(s);
3228 if (max_request == s->thread_info.max_request)
3231 s->thread_info.max_request = max_request;
3233 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3236 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3237 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3241 /* Called from main thread */
3242 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3243 pa_sink_assert_ref(s);
3244 pa_assert_ctl_context();
3246 if (PA_SINK_IS_LINKED(s->state))
3247 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3249 pa_sink_set_max_request_within_thread(s, max_request);
3252 /* Called from IO thread */
3253 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3257 pa_sink_assert_ref(s);
3258 pa_sink_assert_io_context(s);
3260 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3261 s->thread_info.requested_latency_valid = false;
3265 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3267 if (s->update_requested_latency)
3268 s->update_requested_latency(s);
3270 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3271 if (i->update_sink_requested_latency)
3272 i->update_sink_requested_latency(i);
3276 /* Called from main thread */
3277 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3278 pa_sink_assert_ref(s);
3279 pa_assert_ctl_context();
3281 /* min_latency == 0: no limit
3282 * min_latency anything else: specified limit
3284 * Similar for max_latency */
3286 if (min_latency < ABSOLUTE_MIN_LATENCY)
3287 min_latency = ABSOLUTE_MIN_LATENCY;
3289 if (max_latency <= 0 ||
3290 max_latency > ABSOLUTE_MAX_LATENCY)
3291 max_latency = ABSOLUTE_MAX_LATENCY;
3293 pa_assert(min_latency <= max_latency);
3295 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3296 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3297 max_latency == ABSOLUTE_MAX_LATENCY) ||
3298 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3300 if (PA_SINK_IS_LINKED(s->state)) {
3306 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3308 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3311 /* Called from main thread */
3312 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3313 pa_sink_assert_ref(s);
3314 pa_assert_ctl_context();
3315 pa_assert(min_latency);
3316 pa_assert(max_latency);
3318 if (PA_SINK_IS_LINKED(s->state)) {
3319 pa_usec_t r[2] = { 0, 0 };
3321 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3323 *min_latency = r[0];
3324 *max_latency = r[1];
3326 *min_latency = s->thread_info.min_latency;
3327 *max_latency = s->thread_info.max_latency;
3331 /* Called from IO thread */
3332 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3333 pa_sink_assert_ref(s);
3334 pa_sink_assert_io_context(s);
3336 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3337 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3338 pa_assert(min_latency <= max_latency);
3340 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3341 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3342 max_latency == ABSOLUTE_MAX_LATENCY) ||
3343 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3345 if (s->thread_info.min_latency == min_latency &&
3346 s->thread_info.max_latency == max_latency)
3349 s->thread_info.min_latency = min_latency;
3350 s->thread_info.max_latency = max_latency;
3352 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3356 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3357 if (i->update_sink_latency_range)
3358 i->update_sink_latency_range(i);
3361 pa_sink_invalidate_requested_latency(s, false);
3363 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3366 /* Called from main thread */
3367 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3368 pa_sink_assert_ref(s);
3369 pa_assert_ctl_context();
3371 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3372 pa_assert(latency == 0);
3376 if (latency < ABSOLUTE_MIN_LATENCY)
3377 latency = ABSOLUTE_MIN_LATENCY;
3379 if (latency > ABSOLUTE_MAX_LATENCY)
3380 latency = ABSOLUTE_MAX_LATENCY;
3382 if (PA_SINK_IS_LINKED(s->state))
3383 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3385 s->thread_info.fixed_latency = latency;
3387 pa_source_set_fixed_latency(s->monitor_source, latency);
3390 /* Called from main thread */
3391 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3394 pa_sink_assert_ref(s);
3395 pa_assert_ctl_context();
3397 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3400 if (PA_SINK_IS_LINKED(s->state))
3401 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3403 latency = s->thread_info.fixed_latency;
3408 /* Called from IO thread */
3409 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3410 pa_sink_assert_ref(s);
3411 pa_sink_assert_io_context(s);
3413 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3414 pa_assert(latency == 0);
3415 s->thread_info.fixed_latency = 0;
3417 if (s->monitor_source)
3418 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3423 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3424 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3426 if (s->thread_info.fixed_latency == latency)
3429 s->thread_info.fixed_latency = latency;
3431 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3435 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3436 if (i->update_sink_fixed_latency)
3437 i->update_sink_fixed_latency(i);
3440 pa_sink_invalidate_requested_latency(s, false);
3442 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3445 /* Called from main context */
3446 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3447 pa_sink_assert_ref(s);
3449 s->port_latency_offset = offset;
3451 if (PA_SINK_IS_LINKED(s->state))
3452 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3454 s->thread_info.port_latency_offset = offset;
3456 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3459 /* Called from main context */
3460 size_t pa_sink_get_max_rewind(pa_sink *s) {
3462 pa_assert_ctl_context();
3463 pa_sink_assert_ref(s);
3465 if (!PA_SINK_IS_LINKED(s->state))
3466 return s->thread_info.max_rewind;
3468 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3473 /* Called from main context */
3474 size_t pa_sink_get_max_request(pa_sink *s) {
3476 pa_sink_assert_ref(s);
3477 pa_assert_ctl_context();
3479 if (!PA_SINK_IS_LINKED(s->state))
3480 return s->thread_info.max_request;
3482 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3487 /* Called from main context */
3488 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3489 pa_device_port *port;
3492 pa_sink_assert_ref(s);
3493 pa_assert_ctl_context();
3496 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3497 return -PA_ERR_NOTIMPLEMENTED;
3501 return -PA_ERR_NOENTITY;
3503 if (!(port = pa_hashmap_get(s->ports, name)))
3504 return -PA_ERR_NOENTITY;
3506 if (s->active_port == port) {
3507 s->save_port = s->save_port || save;
3511 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3512 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3513 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3517 ret = s->set_port(s, port);
3520 return -PA_ERR_NOENTITY;
3522 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3524 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3526 s->active_port = port;
3527 s->save_port = save;
3529 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3531 /* The active port affects the default sink selection. */
3532 pa_core_update_default_sink(s->core);
3534 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3539 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3540 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3544 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3547 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3549 if (pa_streq(ff, "microphone"))
3550 t = "audio-input-microphone";
3551 else if (pa_streq(ff, "webcam"))
3553 else if (pa_streq(ff, "computer"))
3555 else if (pa_streq(ff, "handset"))
3557 else if (pa_streq(ff, "portable"))
3558 t = "multimedia-player";
3559 else if (pa_streq(ff, "tv"))
3560 t = "video-display";
3563 * The following icons are not part of the icon naming spec,
3564 * because Rodney Dawes sucks as the maintainer of that spec.
3566 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3568 else if (pa_streq(ff, "headset"))
3569 t = "audio-headset";
3570 else if (pa_streq(ff, "headphone"))
3571 t = "audio-headphones";
3572 else if (pa_streq(ff, "speaker"))
3573 t = "audio-speakers";
3574 else if (pa_streq(ff, "hands-free"))
3575 t = "audio-handsfree";
3579 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3580 if (pa_streq(c, "modem"))
3587 t = "audio-input-microphone";
3590 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3591 if (strstr(profile, "analog"))
3593 else if (strstr(profile, "iec958"))
3595 else if (strstr(profile, "hdmi"))
3599 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3601 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3606 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3607 const char *s, *d = NULL, *k;
3610 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3614 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3618 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3619 if (pa_streq(s, "internal"))
3620 d = _("Built-in Audio");
3623 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3624 if (pa_streq(s, "modem"))
3628 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3633 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3636 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3638 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3643 bool pa_device_init_intended_roles(pa_proplist *p) {
3647 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3650 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3651 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3652 || pa_streq(s, "headset")) {
3653 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3660 unsigned pa_device_init_priority(pa_proplist *p) {
3662 unsigned priority = 0;
3666 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3668 if (pa_streq(s, "sound"))
3670 else if (!pa_streq(s, "modem"))
3674 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3676 if (pa_streq(s, "headphone"))
3678 else if (pa_streq(s, "hifi"))
3680 else if (pa_streq(s, "speaker"))
3682 else if (pa_streq(s, "portable"))
3686 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3688 if (pa_streq(s, "bluetooth"))
3690 else if (pa_streq(s, "usb"))
3692 else if (pa_streq(s, "pci"))
3696 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3698 if (pa_startswith(s, "analog-"))
3700 else if (pa_startswith(s, "iec958-"))
3707 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3709 /* Called from the IO thread. */
3710 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3711 pa_sink_volume_change *c;
3712 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3713 c = pa_xnew(pa_sink_volume_change, 1);
3715 PA_LLIST_INIT(pa_sink_volume_change, c);
3717 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3721 /* Called from the IO thread. */
3722 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3724 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3728 /* Called from the IO thread. */
3729 void pa_sink_volume_change_push(pa_sink *s) {
3730 pa_sink_volume_change *c = NULL;
3731 pa_sink_volume_change *nc = NULL;
3732 pa_sink_volume_change *pc = NULL;
3733 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3735 const char *direction = NULL;
3738 nc = pa_sink_volume_change_new(s);
3740 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3741 * Adding one more volume for HW would get us rid of this, but I am trying
3742 * to survive with the ones we already have. */
3743 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3745 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3746 pa_log_debug("Volume not changing");
3747 pa_sink_volume_change_free(nc);
3751 nc->at = pa_sink_get_latency_within_thread(s, false);
3752 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3754 if (s->thread_info.volume_changes_tail) {
3755 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3756 /* If volume is going up let's do it a bit late. If it is going
3757 * down let's do it a bit early. */
3758 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3759 if (nc->at + safety_margin > c->at) {
3760 nc->at += safety_margin;
3765 else if (nc->at - safety_margin > c->at) {
3766 nc->at -= safety_margin;
3774 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3775 nc->at += safety_margin;
3778 nc->at -= safety_margin;
3781 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3784 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3787 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3789 /* We can ignore volume events that came earlier but should happen later than this. */
3790 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3791 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3792 pa_sink_volume_change_free(c);
3795 s->thread_info.volume_changes_tail = nc;
3798 /* Called from the IO thread. */
3799 static void pa_sink_volume_change_flush(pa_sink *s) {
3800 pa_sink_volume_change *c = s->thread_info.volume_changes;
3802 s->thread_info.volume_changes = NULL;
3803 s->thread_info.volume_changes_tail = NULL;
3805 pa_sink_volume_change *next = c->next;
3806 pa_sink_volume_change_free(c);
3811 /* Called from the IO thread. */
3812 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3818 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3824 pa_assert(s->write_volume);
3826 now = pa_rtclock_now();
3828 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3829 pa_sink_volume_change *c = s->thread_info.volume_changes;
3830 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3831 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3832 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3834 s->thread_info.current_hw_volume = c->hw_volume;
3835 pa_sink_volume_change_free(c);
3841 if (s->thread_info.volume_changes) {
3843 *usec_to_next = s->thread_info.volume_changes->at - now;
3844 if (pa_log_ratelimit(PA_LOG_DEBUG))
3845 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3850 s->thread_info.volume_changes_tail = NULL;
3855 /* Called from the IO thread. */
3856 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3857 /* All the queued volume events later than current latency are shifted to happen earlier. */
3858 pa_sink_volume_change *c;
3859 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3860 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3861 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3863 pa_log_debug("latency = %lld", (long long) limit);
3864 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3866 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3867 pa_usec_t modified_limit = limit;
3868 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3869 modified_limit -= s->thread_info.volume_change_safety_margin;
3871 modified_limit += s->thread_info.volume_change_safety_margin;
3872 if (c->at > modified_limit) {
3874 if (c->at < modified_limit)
3875 c->at = modified_limit;
3877 prev_vol = pa_cvolume_avg(&c->hw_volume);
3879 pa_sink_volume_change_apply(s, NULL);
3882 /* Called from the main thread */
3883 /* Gets the list of formats supported by the sink. The members and idxset must
3884 * be freed by the caller. */
3885 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3890 if (s->get_formats) {
3891 /* Sink supports format query, all is good */
3892 ret = s->get_formats(s);
3894 /* Sink doesn't support format query, so assume it does PCM */
3895 pa_format_info *f = pa_format_info_new();
3896 f->encoding = PA_ENCODING_PCM;
3898 ret = pa_idxset_new(NULL, NULL);
3899 pa_idxset_put(ret, f, NULL);
3905 /* Called from the main thread */
3906 /* Allows an external source to set what formats a sink supports if the sink
3907 * permits this. The function makes a copy of the formats on success. */
3908 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3913 /* Sink supports setting formats -- let's give it a shot */
3914 return s->set_formats(s, formats);
3916 /* Sink doesn't support setting this -- bail out */
3920 /* Called from the main thread */
3921 /* Checks if the sink can accept this format */
3922 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3923 pa_idxset *formats = NULL;
3929 formats = pa_sink_get_formats(s);
3932 pa_format_info *finfo_device;
3935 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3936 if (pa_format_info_is_compatible(finfo_device, f)) {
3942 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3948 /* Called from the main thread */
3949 /* Calculates the intersection between formats supported by the sink and
3950 * in_formats, and returns these, in the order of the sink's formats. */
3951 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3952 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3953 pa_format_info *f_sink, *f_in;
3958 if (!in_formats || pa_idxset_isempty(in_formats))
3961 sink_formats = pa_sink_get_formats(s);
3963 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3964 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3965 if (pa_format_info_is_compatible(f_sink, f_in))
3966 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3972 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3977 /* Called from the main thread. */
3978 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3979 pa_cvolume old_volume;
3980 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3981 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3986 old_volume = s->reference_volume;
3988 if (pa_cvolume_equal(volume, &old_volume))
3991 s->reference_volume = *volume;
3992 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3993 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3994 s->flags & PA_SINK_DECIBEL_VOLUME),
3995 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3996 s->flags & PA_SINK_DECIBEL_VOLUME));
3998 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3999 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);