2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
55 #define MAX_MIX_CHANNELS 32
56 #define MIX_BUFFER_LENGTH (pa_page_size())
57 #define ABSOLUTE_MIN_LATENCY (500)
58 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
59 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
61 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
63 struct pa_sink_volume_change {
67 PA_LLIST_FIELDS(pa_sink_volume_change);
70 struct sink_message_set_port {
75 static void sink_free(pa_object *s);
77 static void pa_sink_volume_change_push(pa_sink *s);
78 static void pa_sink_volume_change_flush(pa_sink *s);
79 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
82 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
84 char *dump_time = NULL, *dump_path_surfix = NULL;
85 const char *s_device_api_str, *card_name_str, *device_idx_str;
90 /* open file for dump pcm */
91 if (s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
92 pa_gettimeofday(&now);
93 localtime_r(&now.tv_sec, &tm);
94 memset(&datetime[0], 0x00, sizeof(datetime));
95 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
96 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
98 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
99 if (pa_streq(s_device_api_str, "alsa")) {
100 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
101 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
102 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
104 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
107 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
110 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
111 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
113 s->pcm_dump_fp = fopen(s->dump_path, "w");
115 pa_log_warn("%s open failed", s->dump_path);
117 pa_log_info("%s opened", s->dump_path);
120 pa_xfree(dump_path_surfix);
121 /* close file for dump pcm when config is changed */
122 } else if (~s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && s->pcm_dump_fp) {
123 fclose(s->pcm_dump_fp);
124 pa_log_info("%s closed", s->dump_path);
125 pa_xfree(s->dump_path);
126 s->pcm_dump_fp = NULL;
130 if (s->pcm_dump_fp) {
133 ptr = pa_memblock_acquire(chunk->memblock);
135 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
137 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
139 pa_memblock_release(chunk->memblock);
144 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
148 data->proplist = pa_proplist_new();
149 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
154 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
157 pa_xfree(data->name);
158 data->name = pa_xstrdup(name);
161 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
164 if ((data->sample_spec_is_set = !!spec))
165 data->sample_spec = *spec;
168 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
171 if ((data->channel_map_is_set = !!map))
172 data->channel_map = *map;
175 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
178 data->alternate_sample_rate_is_set = true;
179 data->alternate_sample_rate = alternate_sample_rate;
182 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
185 if ((data->volume_is_set = !!volume))
186 data->volume = *volume;
189 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
192 data->muted_is_set = true;
196 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
199 pa_xfree(data->active_port);
200 data->active_port = pa_xstrdup(port);
203 void pa_sink_new_data_done(pa_sink_new_data *data) {
206 pa_proplist_free(data->proplist);
209 pa_hashmap_free(data->ports);
211 pa_xfree(data->name);
212 pa_xfree(data->active_port);
215 /* Called from main context */
216 static void reset_callbacks(pa_sink *s) {
220 s->get_volume = NULL;
221 s->set_volume = NULL;
222 s->write_volume = NULL;
225 s->request_rewind = NULL;
226 s->update_requested_latency = NULL;
228 s->get_formats = NULL;
229 s->set_formats = NULL;
230 s->reconfigure = NULL;
233 /* Called from main context */
234 pa_sink* pa_sink_new(
236 pa_sink_new_data *data,
237 pa_sink_flags_t flags) {
241 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
242 pa_source_new_data source_data;
248 pa_assert(data->name);
249 pa_assert_ctl_context();
251 s = pa_msgobject_new(pa_sink);
253 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
254 pa_log_debug("Failed to register name %s.", data->name);
259 pa_sink_new_data_set_name(data, name);
261 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
263 pa_namereg_unregister(core, name);
267 /* FIXME, need to free s here on failure */
269 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
270 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
272 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
274 if (!data->channel_map_is_set)
275 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
277 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
278 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
280 /* FIXME: There should probably be a general function for checking whether
281 * the sink volume is allowed to be set, like there is for sink inputs. */
282 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
284 if (!data->volume_is_set) {
285 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
286 data->save_volume = false;
289 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
290 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
292 if (!data->muted_is_set)
296 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
298 pa_device_init_description(data->proplist, data->card);
299 pa_device_init_icon(data->proplist, true);
300 pa_device_init_intended_roles(data->proplist);
302 if (!data->active_port) {
303 pa_device_port *p = pa_device_port_find_best(data->ports);
305 pa_sink_new_data_set_port(data, p->name);
308 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
310 pa_namereg_unregister(core, name);
314 s->parent.parent.free = sink_free;
315 s->parent.process_msg = pa_sink_process_msg;
318 s->state = PA_SINK_INIT;
321 s->suspend_cause = data->suspend_cause;
322 pa_sink_set_mixer_dirty(s, false);
323 s->name = pa_xstrdup(name);
324 s->proplist = pa_proplist_copy(data->proplist);
325 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
326 s->module = data->module;
327 s->card = data->card;
329 s->priority = pa_device_init_priority(s->proplist);
331 s->sample_spec = data->sample_spec;
332 s->channel_map = data->channel_map;
333 s->default_sample_rate = s->sample_spec.rate;
335 if (data->alternate_sample_rate_is_set)
336 s->alternate_sample_rate = data->alternate_sample_rate;
338 s->alternate_sample_rate = s->core->alternate_sample_rate;
340 if (s->sample_spec.rate == s->alternate_sample_rate) {
341 pa_log_warn("Default and alternate sample rates are the same.");
342 s->alternate_sample_rate = 0;
345 s->inputs = pa_idxset_new(NULL, NULL);
347 s->input_to_master = NULL;
349 s->reference_volume = s->real_volume = data->volume;
350 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
351 s->base_volume = PA_VOLUME_NORM;
352 s->n_volume_steps = PA_VOLUME_NORM+1;
353 s->muted = data->muted;
354 s->refresh_volume = s->refresh_muted = false;
361 /* As a minor optimization we just steal the list instead of
363 s->ports = data->ports;
366 s->active_port = NULL;
367 s->save_port = false;
369 if (data->active_port)
370 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
371 s->save_port = data->save_port;
373 /* Hopefully the active port has already been assigned in the previous call
374 to pa_device_port_find_best, but better safe than sorry */
376 s->active_port = pa_device_port_find_best(s->ports);
379 s->port_latency_offset = s->active_port->latency_offset;
381 s->port_latency_offset = 0;
383 s->save_volume = data->save_volume;
384 s->save_muted = data->save_muted;
385 #ifdef TIZEN_PCM_DUMP
386 s->pcm_dump_fp = NULL;
390 pa_silence_memchunk_get(
391 &core->silence_cache,
397 s->thread_info.rtpoll = NULL;
398 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
399 (pa_free_cb_t) pa_sink_input_unref);
400 s->thread_info.soft_volume = s->soft_volume;
401 s->thread_info.soft_muted = s->muted;
402 s->thread_info.state = s->state;
403 s->thread_info.rewind_nbytes = 0;
404 s->thread_info.rewind_requested = false;
405 s->thread_info.max_rewind = 0;
406 s->thread_info.max_request = 0;
407 s->thread_info.requested_latency_valid = false;
408 s->thread_info.requested_latency = 0;
409 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
410 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
411 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
413 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
414 s->thread_info.volume_changes_tail = NULL;
415 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
416 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
417 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
418 s->thread_info.port_latency_offset = s->port_latency_offset;
420 /* FIXME: This should probably be moved to pa_sink_put() */
421 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
424 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
426 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
427 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
430 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
431 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
435 pa_source_new_data_init(&source_data);
436 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
437 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
438 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
439 source_data.name = pa_sprintf_malloc("%s.monitor", name);
440 source_data.driver = data->driver;
441 source_data.module = data->module;
442 source_data.card = data->card;
444 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
445 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
446 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
448 s->monitor_source = pa_source_new(core, &source_data,
449 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
450 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
452 pa_source_new_data_done(&source_data);
454 if (!s->monitor_source) {
460 s->monitor_source->monitor_of = s;
462 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
463 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
464 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
469 /* Called from main context */
470 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
473 bool suspend_cause_changed;
478 pa_assert_ctl_context();
480 state_changed = state != s->state;
481 suspend_cause_changed = suspend_cause != s->suspend_cause;
483 if (!state_changed && !suspend_cause_changed)
486 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
487 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
489 /* If we are resuming, suspend_cause must be 0. */
490 pa_assert(!resuming || !suspend_cause);
492 /* Here's something to think about: what to do with the suspend cause if
493 * resuming the sink fails? The old suspend cause will be incorrect, so we
494 * can't use that. On the other hand, if we set no suspend cause (as is the
495 * case currently), then it looks strange to have a sink suspended without
496 * any cause. It might be a good idea to add a new "resume failed" suspend
497 * cause, or it might just add unnecessary complexity, given that the
498 * current approach of not setting any suspend cause works well enough. */
500 if (s->set_state && state_changed) {
501 ret = s->set_state(s, state);
502 /* set_state() is allowed to fail only when resuming. */
503 pa_assert(ret >= 0 || resuming);
506 if (ret >= 0 && s->asyncmsgq && state_changed)
507 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
508 /* SET_STATE is allowed to fail only when resuming. */
512 s->set_state(s, PA_SINK_SUSPENDED);
515 #ifdef TIZEN_PCM_DUMP
516 /* close file for dump pcm */
517 if (s->pcm_dump_fp && (s->core->pcm_dump_option & PA_PCM_DUMP_OPTION_SEPARATED) && suspending) {
518 fclose(s->pcm_dump_fp);
519 pa_log_info("%s closed", s->dump_path);
520 pa_xfree(s->dump_path);
521 s->pcm_dump_fp = NULL;
524 if (suspend_cause_changed) {
525 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
526 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
528 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
529 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
530 s->suspend_cause = suspend_cause;
537 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
540 /* If we enter UNLINKED state, then we don't send change notifications.
541 * pa_sink_unlink() will send unlink notifications instead. */
542 if (state != PA_SINK_UNLINKED) {
543 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
544 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
548 if (suspending || resuming) {
552 /* We're suspending or resuming, tell everyone about it */
554 PA_IDXSET_FOREACH(i, s->inputs, idx)
555 if (s->state == PA_SINK_SUSPENDED &&
556 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
557 pa_sink_input_kill(i);
559 i->suspend(i, state == PA_SINK_SUSPENDED);
563 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source)
564 pa_source_sync_suspend(s->monitor_source);
569 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
575 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
576 pa_sink_flags_t flags;
579 pa_assert(!s->write_volume || cb);
583 /* Save the current flags so we can tell if they've changed */
587 /* The sink implementor is responsible for setting decibel volume support */
588 s->flags |= PA_SINK_HW_VOLUME_CTRL;
590 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
591 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
592 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
595 /* If the flags have changed after init, let any clients know via a change event */
596 if (s->state != PA_SINK_INIT && flags != s->flags)
597 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
600 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
601 pa_sink_flags_t flags;
604 pa_assert(!cb || s->set_volume);
606 s->write_volume = cb;
608 /* Save the current flags so we can tell if they've changed */
612 s->flags |= PA_SINK_DEFERRED_VOLUME;
614 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
616 /* If the flags have changed after init, let any clients know via a change event */
617 if (s->state != PA_SINK_INIT && flags != s->flags)
618 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
621 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
627 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
628 pa_sink_flags_t flags;
634 /* Save the current flags so we can tell if they've changed */
638 s->flags |= PA_SINK_HW_MUTE_CTRL;
640 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
642 /* If the flags have changed after init, let any clients know via a change event */
643 if (s->state != PA_SINK_INIT && flags != s->flags)
644 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
647 static void enable_flat_volume(pa_sink *s, bool enable) {
648 pa_sink_flags_t flags;
652 /* Always follow the overall user preference here */
653 enable = enable && s->core->flat_volumes;
655 /* Save the current flags so we can tell if they've changed */
659 s->flags |= PA_SINK_FLAT_VOLUME;
661 s->flags &= ~PA_SINK_FLAT_VOLUME;
663 /* If the flags have changed after init, let any clients know via a change event */
664 if (s->state != PA_SINK_INIT && flags != s->flags)
665 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
668 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
669 pa_sink_flags_t flags;
673 /* Save the current flags so we can tell if they've changed */
677 s->flags |= PA_SINK_DECIBEL_VOLUME;
678 enable_flat_volume(s, true);
680 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
681 enable_flat_volume(s, false);
684 /* If the flags have changed after init, let any clients know via a change event */
685 if (s->state != PA_SINK_INIT && flags != s->flags)
686 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
689 /* Called from main context */
690 void pa_sink_put(pa_sink* s) {
691 pa_sink_assert_ref(s);
692 pa_assert_ctl_context();
694 pa_assert(s->state == PA_SINK_INIT);
695 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
697 /* The following fields must be initialized properly when calling _put() */
698 pa_assert(s->asyncmsgq);
699 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
701 /* Generally, flags should be initialized via pa_sink_new(). As a
702 * special exception we allow some volume related flags to be set
703 * between _new() and _put() by the callback setter functions above.
705 * Thus we implement a couple safeguards here which ensure the above
706 * setters were used (or at least the implementor made manual changes
707 * in a compatible way).
709 * Note: All of these flags set here can change over the life time
711 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
712 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
713 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
715 /* XXX: Currently decibel volume is disabled for all sinks that use volume
716 * sharing. When the master sink supports decibel volume, it would be good
717 * to have the flag also in the filter sink, but currently we don't do that
718 * so that the flags of the filter sink never change when it's moved from
719 * a master sink to another. One solution for this problem would be to
720 * remove user-visible volume altogether from filter sinks when volume
721 * sharing is used, but the current approach was easier to implement... */
722 /* We always support decibel volumes in software, otherwise we leave it to
723 * the sink implementor to set this flag as needed.
725 * Note: This flag can also change over the life time of the sink. */
726 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
727 pa_sink_enable_decibel_volume(s, true);
728 s->soft_volume = s->reference_volume;
731 /* If the sink implementor support DB volumes by itself, we should always
732 * try and enable flat volumes too */
733 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
734 enable_flat_volume(s, true);
736 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
737 pa_sink *root_sink = pa_sink_get_master(s);
739 pa_assert(root_sink);
741 s->reference_volume = root_sink->reference_volume;
742 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
744 s->real_volume = root_sink->real_volume;
745 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
747 /* We assume that if the sink implementor changed the default
748 * volume he did so in real_volume, because that is the usual
749 * place where he is supposed to place his changes. */
750 s->reference_volume = s->real_volume;
752 s->thread_info.soft_volume = s->soft_volume;
753 s->thread_info.soft_muted = s->muted;
754 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
756 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
757 || (s->base_volume == PA_VOLUME_NORM
758 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
759 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
760 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
761 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
762 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
764 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
765 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
766 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
768 if (s->suspend_cause)
769 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
771 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
773 pa_source_put(s->monitor_source);
775 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
776 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
778 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
779 * because module-switch-on-connect needs to know the old default sink */
780 pa_core_update_default_sink(s->core);
783 /* Called from main context */
784 void pa_sink_unlink(pa_sink* s) {
786 pa_sink_input *i, PA_UNUSED *j = NULL;
788 pa_sink_assert_ref(s);
789 pa_assert_ctl_context();
791 /* Please note that pa_sink_unlink() does more than simply
792 * reversing pa_sink_put(). It also undoes the registrations
793 * already done in pa_sink_new()! */
795 if (s->unlink_requested)
798 s->unlink_requested = true;
800 linked = PA_SINK_IS_LINKED(s->state);
803 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
805 if (s->state != PA_SINK_UNLINKED)
806 pa_namereg_unregister(s->core, s->name);
807 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
809 pa_core_update_default_sink(s->core);
812 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
814 while ((i = pa_idxset_first(s->inputs, NULL))) {
816 pa_sink_input_kill(i);
821 sink_set_state(s, PA_SINK_UNLINKED, 0);
823 s->state = PA_SINK_UNLINKED;
827 if (s->monitor_source)
828 pa_source_unlink(s->monitor_source);
831 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
832 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
836 /* Called from main context */
837 static void sink_free(pa_object *o) {
838 pa_sink *s = PA_SINK(o);
841 pa_assert_ctl_context();
842 pa_assert(pa_sink_refcnt(s) == 0);
843 pa_assert(!PA_SINK_IS_LINKED(s->state));
845 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
847 pa_sink_volume_change_flush(s);
849 if (s->monitor_source) {
850 pa_source_unref(s->monitor_source);
851 s->monitor_source = NULL;
854 pa_idxset_free(s->inputs, NULL);
855 pa_hashmap_free(s->thread_info.inputs);
857 if (s->silence.memblock)
858 pa_memblock_unref(s->silence.memblock);
864 pa_proplist_free(s->proplist);
867 pa_hashmap_free(s->ports);
869 #ifdef TIZEN_PCM_DUMP
870 /* close file for dump pcm */
871 if (s->pcm_dump_fp) {
872 fclose(s->pcm_dump_fp);
873 pa_log_info("%s closed", s->dump_path);
874 pa_xfree(s->dump_path);
875 s->pcm_dump_fp = NULL;
881 /* Called from main context, and not while the IO thread is active, please */
882 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
883 pa_sink_assert_ref(s);
884 pa_assert_ctl_context();
888 if (s->monitor_source)
889 pa_source_set_asyncmsgq(s->monitor_source, q);
892 /* Called from main context, and not while the IO thread is active, please */
893 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
894 pa_sink_flags_t old_flags;
895 pa_sink_input *input;
898 pa_sink_assert_ref(s);
899 pa_assert_ctl_context();
901 /* For now, allow only a minimal set of flags to be changed. */
902 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
904 old_flags = s->flags;
905 s->flags = (s->flags & ~mask) | (value & mask);
907 if (s->flags == old_flags)
910 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
911 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
913 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
914 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
915 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
917 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
918 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
920 if (s->monitor_source)
921 pa_source_update_flags(s->monitor_source,
922 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
923 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
924 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
925 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
927 PA_IDXSET_FOREACH(input, s->inputs, idx) {
928 if (input->origin_sink)
929 pa_sink_update_flags(input->origin_sink, mask, value);
933 /* Called from IO context, or before _put() from main context */
934 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
935 pa_sink_assert_ref(s);
936 pa_sink_assert_io_context(s);
938 s->thread_info.rtpoll = p;
940 if (s->monitor_source)
941 pa_source_set_rtpoll(s->monitor_source, p);
944 /* Called from main context */
945 int pa_sink_update_status(pa_sink*s) {
946 pa_sink_assert_ref(s);
947 pa_assert_ctl_context();
948 pa_assert(PA_SINK_IS_LINKED(s->state));
950 if (s->state == PA_SINK_SUSPENDED)
953 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
956 /* Called from any context - must be threadsafe */
957 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
958 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
961 /* Called from main context */
962 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
963 pa_suspend_cause_t merged_cause;
965 pa_sink_assert_ref(s);
966 pa_assert_ctl_context();
967 pa_assert(PA_SINK_IS_LINKED(s->state));
968 pa_assert(cause != 0);
971 merged_cause = s->suspend_cause | cause;
973 merged_cause = s->suspend_cause & ~cause;
975 if (!(merged_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
976 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
977 it'll be handled just fine. */
978 pa_sink_set_mixer_dirty(s, false);
979 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
980 if (s->active_port && s->set_port) {
981 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
982 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
983 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
986 s->set_port(s, s->active_port);
997 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
999 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1002 /* Called from main context */
1003 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1004 pa_sink_input *i, *n;
1007 pa_sink_assert_ref(s);
1008 pa_assert_ctl_context();
1009 pa_assert(PA_SINK_IS_LINKED(s->state));
1014 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1015 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1017 pa_sink_input_ref(i);
1019 if (pa_sink_input_start_move(i) >= 0)
1020 pa_queue_push(q, i);
1022 pa_sink_input_unref(i);
1028 /* Called from main context */
1029 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1032 pa_sink_assert_ref(s);
1033 pa_assert_ctl_context();
1034 pa_assert(PA_SINK_IS_LINKED(s->state));
1037 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1038 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1039 if (pa_sink_input_finish_move(i, s, save) < 0)
1040 pa_sink_input_fail_move(i);
1043 pa_sink_input_unref(i);
1046 pa_queue_free(q, NULL);
1049 /* Called from main context */
1050 void pa_sink_move_all_fail(pa_queue *q) {
1053 pa_assert_ctl_context();
1056 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1057 pa_sink_input_fail_move(i);
1058 pa_sink_input_unref(i);
1061 pa_queue_free(q, NULL);
1064 /* Called from IO thread context */
1065 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1070 pa_sink_assert_ref(s);
1071 pa_sink_assert_io_context(s);
1073 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1074 size_t uf = i->thread_info.underrun_for_sink;
1076 /* Propagate down the filter tree */
1077 if (i->origin_sink) {
1078 size_t filter_result, left_to_play_origin;
1080 /* The recursive call works in the origin sink domain ... */
1081 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1083 /* .. and returns the time to sleep before waking up. We need the
1084 * underrun duration for comparisons, so we undo the subtraction on
1085 * the return value... */
1086 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1088 /* ... and convert it back to the master sink domain */
1089 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1091 /* Remember the longest underrun so far */
1092 if (filter_result > result)
1093 result = filter_result;
1097 /* No underrun here, move on */
1099 } else if (uf >= left_to_play) {
1100 /* The sink has possibly consumed all the data the sink input provided */
1101 pa_sink_input_process_underrun(i);
1102 } else if (uf > result) {
1103 /* Remember the longest underrun so far */
1109 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1110 (long) result, (long) left_to_play - result);
1111 return left_to_play - result;
1114 /* Called from IO thread context */
1115 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1119 pa_sink_assert_ref(s);
1120 pa_sink_assert_io_context(s);
1121 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1123 /* If nobody requested this and this is actually no real rewind
1124 * then we can short cut this. Please note that this means that
1125 * not all rewind requests triggered upstream will always be
1126 * translated in actual requests! */
1127 if (!s->thread_info.rewind_requested && nbytes <= 0)
1130 s->thread_info.rewind_nbytes = 0;
1131 s->thread_info.rewind_requested = false;
1134 pa_log_debug("Processing rewind...");
1135 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1136 pa_sink_volume_change_rewind(s, nbytes);
1137 #ifdef TIZEN_PCM_DUMP
1140 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1144 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1145 pa_sink_input_assert_ref(i);
1146 pa_sink_input_process_rewind(i, nbytes);
1150 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1151 pa_source_process_rewind(s->monitor_source, nbytes);
1155 /* Called from IO thread context */
1156 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1160 size_t mixlength = *length;
1162 pa_sink_assert_ref(s);
1163 pa_sink_assert_io_context(s);
1166 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1167 pa_sink_input_assert_ref(i);
1169 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1171 if (mixlength == 0 || info->chunk.length < mixlength)
1172 mixlength = info->chunk.length;
1174 if (pa_memblock_is_silence(info->chunk.memblock)) {
1175 pa_memblock_unref(info->chunk.memblock);
1179 info->userdata = pa_sink_input_ref(i);
1181 pa_assert(info->chunk.memblock);
1182 pa_assert(info->chunk.length > 0);
1190 *length = mixlength;
1195 /* Called from IO thread context */
1196 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1200 unsigned n_unreffed = 0;
1202 pa_sink_assert_ref(s);
1203 pa_sink_assert_io_context(s);
1205 pa_assert(result->memblock);
1206 pa_assert(result->length > 0);
1208 /* We optimize for the case where the order of the inputs has not changed */
1210 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1212 pa_mix_info* m = NULL;
1214 pa_sink_input_assert_ref(i);
1216 /* Let's try to find the matching entry info the pa_mix_info array */
1217 for (j = 0; j < n; j ++) {
1219 if (info[p].userdata == i) {
1229 /* Drop read data */
1230 pa_sink_input_drop(i, result->length);
1232 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1234 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1235 void *ostate = NULL;
1236 pa_source_output *o;
1239 if (m && m->chunk.memblock) {
1241 pa_memblock_ref(c.memblock);
1242 pa_assert(result->length <= c.length);
1243 c.length = result->length;
1245 pa_memchunk_make_writable(&c, 0);
1246 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1249 pa_memblock_ref(c.memblock);
1250 pa_assert(result->length <= c.length);
1251 c.length = result->length;
1254 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1255 pa_source_output_assert_ref(o);
1256 pa_assert(o->direct_on_input == i);
1257 pa_source_post_direct(s->monitor_source, o, &c);
1260 pa_memblock_unref(c.memblock);
1265 if (m->chunk.memblock) {
1266 pa_memblock_unref(m->chunk.memblock);
1267 pa_memchunk_reset(&m->chunk);
1270 pa_sink_input_unref(m->userdata);
1277 /* Now drop references to entries that are included in the
1278 * pa_mix_info array but don't exist anymore */
1280 if (n_unreffed < n) {
1281 for (; n > 0; info++, n--) {
1283 pa_sink_input_unref(info->userdata);
1284 if (info->chunk.memblock)
1285 pa_memblock_unref(info->chunk.memblock);
1289 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1290 pa_source_post(s->monitor_source, result);
1293 /* Called from IO thread context */
1294 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1295 pa_mix_info info[MAX_MIX_CHANNELS];
1297 size_t block_size_max;
1299 pa_sink_assert_ref(s);
1300 pa_sink_assert_io_context(s);
1301 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1302 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1305 pa_assert(!s->thread_info.rewind_requested);
1306 pa_assert(s->thread_info.rewind_nbytes == 0);
1308 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1309 result->memblock = pa_memblock_ref(s->silence.memblock);
1310 result->index = s->silence.index;
1311 result->length = PA_MIN(s->silence.length, length);
1318 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1320 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1321 if (length > block_size_max)
1322 length = pa_frame_align(block_size_max, &s->sample_spec);
1324 pa_assert(length > 0);
1326 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1330 *result = s->silence;
1331 pa_memblock_ref(result->memblock);
1333 if (result->length > length)
1334 result->length = length;
1336 } else if (n == 1) {
1339 *result = info[0].chunk;
1340 pa_memblock_ref(result->memblock);
1342 if (result->length > length)
1343 result->length = length;
1345 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1347 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1348 pa_memblock_unref(result->memblock);
1349 pa_silence_memchunk_get(&s->core->silence_cache,
1354 } else if (!pa_cvolume_is_norm(&volume)) {
1355 pa_memchunk_make_writable(result, 0);
1356 pa_volume_memchunk(result, &s->sample_spec, &volume);
1360 result->memblock = pa_memblock_new(s->core->mempool, length);
1362 ptr = pa_memblock_acquire(result->memblock);
1363 result->length = pa_mix(info, n,
1366 &s->thread_info.soft_volume,
1367 s->thread_info.soft_muted);
1368 pa_memblock_release(result->memblock);
1373 inputs_drop(s, info, n, result);
1375 #ifdef TIZEN_PCM_DUMP
1376 pa_sink_write_pcm_dump(s, result);
1381 /* Called from IO thread context */
1382 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1383 pa_mix_info info[MAX_MIX_CHANNELS];
1385 size_t length, block_size_max;
1387 pa_sink_assert_ref(s);
1388 pa_sink_assert_io_context(s);
1389 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1391 pa_assert(target->memblock);
1392 pa_assert(target->length > 0);
1393 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1395 pa_assert(!s->thread_info.rewind_requested);
1396 pa_assert(s->thread_info.rewind_nbytes == 0);
1398 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1399 pa_silence_memchunk(target, &s->sample_spec);
1405 length = target->length;
1406 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1407 if (length > block_size_max)
1408 length = pa_frame_align(block_size_max, &s->sample_spec);
1410 pa_assert(length > 0);
1412 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1415 if (target->length > length)
1416 target->length = length;
1418 pa_silence_memchunk(target, &s->sample_spec);
1419 } else if (n == 1) {
1422 if (target->length > length)
1423 target->length = length;
1425 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1427 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1428 pa_silence_memchunk(target, &s->sample_spec);
1432 vchunk = info[0].chunk;
1433 pa_memblock_ref(vchunk.memblock);
1435 if (vchunk.length > length)
1436 vchunk.length = length;
1438 if (!pa_cvolume_is_norm(&volume)) {
1439 pa_memchunk_make_writable(&vchunk, 0);
1440 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1443 pa_memchunk_memcpy(target, &vchunk);
1444 pa_memblock_unref(vchunk.memblock);
1450 ptr = pa_memblock_acquire(target->memblock);
1452 target->length = pa_mix(info, n,
1453 (uint8_t*) ptr + target->index, length,
1455 &s->thread_info.soft_volume,
1456 s->thread_info.soft_muted);
1458 pa_memblock_release(target->memblock);
1461 inputs_drop(s, info, n, target);
1463 #ifdef TIZEN_PCM_DUMP
1464 pa_sink_write_pcm_dump(s, target);
1469 /* Called from IO thread context */
1470 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1474 pa_sink_assert_ref(s);
1475 pa_sink_assert_io_context(s);
1476 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1478 pa_assert(target->memblock);
1479 pa_assert(target->length > 0);
1480 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1482 pa_assert(!s->thread_info.rewind_requested);
1483 pa_assert(s->thread_info.rewind_nbytes == 0);
1485 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1486 pa_silence_memchunk(target, &s->sample_spec);
1499 pa_sink_render_into(s, &chunk);
1508 /* Called from IO thread context */
1509 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1510 pa_sink_assert_ref(s);
1511 pa_sink_assert_io_context(s);
1512 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1513 pa_assert(length > 0);
1514 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1517 pa_assert(!s->thread_info.rewind_requested);
1518 pa_assert(s->thread_info.rewind_nbytes == 0);
1522 pa_sink_render(s, length, result);
1524 if (result->length < length) {
1527 pa_memchunk_make_writable(result, length);
1529 chunk.memblock = result->memblock;
1530 chunk.index = result->index + result->length;
1531 chunk.length = length - result->length;
1533 pa_sink_render_into_full(s, &chunk);
1535 result->length = length;
1541 /* Called from main thread */
1542 int pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1544 pa_sample_spec desired_spec;
1545 uint32_t default_rate = s->default_sample_rate;
1546 uint32_t alternate_rate = s->alternate_sample_rate;
1549 bool default_rate_is_usable = false;
1550 bool alternate_rate_is_usable = false;
1551 bool avoid_resampling = s->core->avoid_resampling;
1553 /* We currently only try to reconfigure the sample rate */
1555 if (pa_sample_spec_equal(spec, &s->sample_spec))
1558 if (!s->reconfigure)
1561 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1562 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1566 if (PA_SINK_IS_RUNNING(s->state)) {
1567 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1568 s->sample_spec.rate);
1572 if (s->monitor_source) {
1573 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1574 pa_log_info("Cannot update rate, monitor source is RUNNING");
1579 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1582 desired_spec = s->sample_spec;
1585 /* We have to try to use the sink input rate */
1586 desired_spec.rate = spec->rate;
1588 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1589 /* We just try to set the sink input's sample rate if it's not too low */
1590 desired_spec.rate = spec->rate;
1592 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1593 /* We can directly try to use this rate */
1594 desired_spec.rate = spec->rate;
1597 /* See if we can pick a rate that results in less resampling effort */
1598 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1599 default_rate_is_usable = true;
1600 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1601 default_rate_is_usable = true;
1602 if (alternate_rate && alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1603 alternate_rate_is_usable = true;
1604 if (alternate_rate && alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1605 alternate_rate_is_usable = true;
1607 if (alternate_rate_is_usable && !default_rate_is_usable)
1608 desired_spec.rate = alternate_rate;
1610 desired_spec.rate = default_rate;
1613 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1616 if (!passthrough && pa_sink_used_by(s) > 0)
1619 pa_log_debug("Suspending sink %s due to changing format.", s->name);
1620 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1622 if (s->reconfigure(s, &desired_spec, passthrough) >= 0) {
1623 /* update monitor source as well */
1624 if (s->monitor_source && !passthrough)
1625 pa_source_reconfigure(s->monitor_source, &desired_spec, false);
1626 pa_log_info("Changed format successfully");
1628 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1629 if (i->state == PA_SINK_INPUT_CORKED)
1630 pa_sink_input_update_rate(i);
1636 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1641 /* Called from main thread */
1642 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1645 pa_sink_assert_ref(s);
1646 pa_assert_ctl_context();
1647 pa_assert(PA_SINK_IS_LINKED(s->state));
1649 /* The returned value is supposed to be in the time domain of the sound card! */
1651 if (s->state == PA_SINK_SUSPENDED)
1654 if (!(s->flags & PA_SINK_LATENCY))
1657 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1659 /* the return value is unsigned, so check that the offset can be added to usec without
1661 if (-s->port_latency_offset <= usec)
1662 usec += s->port_latency_offset;
1666 return (pa_usec_t)usec;
1669 /* Called from IO thread */
1670 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1674 pa_sink_assert_ref(s);
1675 pa_sink_assert_io_context(s);
1676 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1678 /* The returned value is supposed to be in the time domain of the sound card! */
1680 if (s->thread_info.state == PA_SINK_SUSPENDED)
1683 if (!(s->flags & PA_SINK_LATENCY))
1686 o = PA_MSGOBJECT(s);
1688 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1690 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1692 /* If allow_negative is false, the call should only return positive values, */
1693 usec += s->thread_info.port_latency_offset;
1694 if (!allow_negative && usec < 0)
1700 /* Called from the main thread (and also from the IO thread while the main
1701 * thread is waiting).
1703 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1704 * set. Instead, flat volume mode is detected by checking whether the root sink
1705 * has the flag set. */
1706 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1707 pa_sink_assert_ref(s);
1709 s = pa_sink_get_master(s);
1712 return (s->flags & PA_SINK_FLAT_VOLUME);
1717 /* Called from the main thread (and also from the IO thread while the main
1718 * thread is waiting). */
1719 pa_sink *pa_sink_get_master(pa_sink *s) {
1720 pa_sink_assert_ref(s);
1722 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1723 if (PA_UNLIKELY(!s->input_to_master))
1726 s = s->input_to_master->sink;
1732 /* Called from main context */
1733 bool pa_sink_is_filter(pa_sink *s) {
1734 pa_sink_assert_ref(s);
1736 return (s->input_to_master != NULL);
1739 /* Called from main context */
1740 bool pa_sink_is_passthrough(pa_sink *s) {
1741 pa_sink_input *alt_i;
1744 pa_sink_assert_ref(s);
1746 /* one and only one PASSTHROUGH input can possibly be connected */
1747 if (pa_idxset_size(s->inputs) == 1) {
1748 alt_i = pa_idxset_first(s->inputs, &idx);
1750 if (pa_sink_input_is_passthrough(alt_i))
1757 /* Called from main context */
1758 void pa_sink_enter_passthrough(pa_sink *s) {
1761 /* The sink implementation is reconfigured for passthrough in
1762 * pa_sink_reconfigure(). This function sets the PA core objects to
1763 * passthrough mode. */
1765 /* disable the monitor in passthrough mode */
1766 if (s->monitor_source) {
1767 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1768 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1771 /* set the volume to NORM */
1772 s->saved_volume = *pa_sink_get_volume(s, true);
1773 s->saved_save_volume = s->save_volume;
1775 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1776 pa_sink_set_volume(s, &volume, true, false);
1778 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1781 /* Called from main context */
1782 void pa_sink_leave_passthrough(pa_sink *s) {
1783 /* Unsuspend monitor */
1784 if (s->monitor_source) {
1785 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1786 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1789 /* Restore sink volume to what it was before we entered passthrough mode */
1790 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1792 pa_cvolume_init(&s->saved_volume);
1793 s->saved_save_volume = false;
1797 /* Called from main context. */
1798 static void compute_reference_ratio(pa_sink_input *i) {
1800 pa_cvolume remapped;
1804 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1807 * Calculates the reference ratio from the sink's reference
1808 * volume. This basically calculates:
1810 * i->reference_ratio = i->volume / i->sink->reference_volume
1813 remapped = i->sink->reference_volume;
1814 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1816 ratio = i->reference_ratio;
1818 for (c = 0; c < i->sample_spec.channels; c++) {
1820 /* We don't update when the sink volume is 0 anyway */
1821 if (remapped.values[c] <= PA_VOLUME_MUTED)
1824 /* Don't update the reference ratio unless necessary */
1825 if (pa_sw_volume_multiply(
1827 remapped.values[c]) == i->volume.values[c])
1830 ratio.values[c] = pa_sw_volume_divide(
1831 i->volume.values[c],
1832 remapped.values[c]);
1835 pa_sink_input_set_reference_ratio(i, &ratio);
1838 /* Called from main context. Only called for the root sink in volume sharing
1839 * cases, except for internal recursive calls. */
1840 static void compute_reference_ratios(pa_sink *s) {
1844 pa_sink_assert_ref(s);
1845 pa_assert_ctl_context();
1846 pa_assert(PA_SINK_IS_LINKED(s->state));
1847 pa_assert(pa_sink_flat_volume_enabled(s));
1849 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1850 compute_reference_ratio(i);
1852 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1853 && PA_SINK_IS_LINKED(i->origin_sink->state))
1854 compute_reference_ratios(i->origin_sink);
1858 /* Called from main context. Only called for the root sink in volume sharing
1859 * cases, except for internal recursive calls. */
1860 static void compute_real_ratios(pa_sink *s) {
1864 pa_sink_assert_ref(s);
1865 pa_assert_ctl_context();
1866 pa_assert(PA_SINK_IS_LINKED(s->state));
1867 pa_assert(pa_sink_flat_volume_enabled(s));
1869 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1871 pa_cvolume remapped;
1873 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1874 /* The origin sink uses volume sharing, so this input's real ratio
1875 * is handled as a special case - the real ratio must be 0 dB, and
1876 * as a result i->soft_volume must equal i->volume_factor. */
1877 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1878 i->soft_volume = i->volume_factor;
1880 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1881 compute_real_ratios(i->origin_sink);
1887 * This basically calculates:
1889 * i->real_ratio := i->volume / s->real_volume
1890 * i->soft_volume := i->real_ratio * i->volume_factor
1893 remapped = s->real_volume;
1894 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1896 i->real_ratio.channels = i->sample_spec.channels;
1897 i->soft_volume.channels = i->sample_spec.channels;
1899 for (c = 0; c < i->sample_spec.channels; c++) {
1901 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1902 /* We leave i->real_ratio untouched */
1903 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1907 /* Don't lose accuracy unless necessary */
1908 if (pa_sw_volume_multiply(
1909 i->real_ratio.values[c],
1910 remapped.values[c]) != i->volume.values[c])
1912 i->real_ratio.values[c] = pa_sw_volume_divide(
1913 i->volume.values[c],
1914 remapped.values[c]);
1916 i->soft_volume.values[c] = pa_sw_volume_multiply(
1917 i->real_ratio.values[c],
1918 i->volume_factor.values[c]);
1921 /* We don't copy the soft_volume to the thread_info data
1922 * here. That must be done by the caller */
1926 static pa_cvolume *cvolume_remap_minimal_impact(
1928 const pa_cvolume *template,
1929 const pa_channel_map *from,
1930 const pa_channel_map *to) {
1935 pa_assert(template);
1938 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1939 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1941 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1942 * mapping from sink input to sink volumes:
1944 * If template is a possible remapping from v it is used instead
1945 * of remapping anew.
1947 * If the channel maps don't match we set an all-channel volume on
1948 * the sink to ensure that changing a volume on one stream has no
1949 * effect that cannot be compensated for in another stream that
1950 * does not have the same channel map as the sink. */
1952 if (pa_channel_map_equal(from, to))
1956 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1961 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1965 /* Called from main thread. Only called for the root sink in volume sharing
1966 * cases, except for internal recursive calls. */
1967 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1971 pa_sink_assert_ref(s);
1972 pa_assert(max_volume);
1973 pa_assert(channel_map);
1974 pa_assert(pa_sink_flat_volume_enabled(s));
1976 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1977 pa_cvolume remapped;
1979 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1980 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1981 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1983 /* Ignore this input. The origin sink uses volume sharing, so this
1984 * input's volume will be set to be equal to the root sink's real
1985 * volume. Obviously this input's current volume must not then
1986 * affect what the root sink's real volume will be. */
1990 remapped = i->volume;
1991 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1992 pa_cvolume_merge(max_volume, max_volume, &remapped);
1996 /* Called from main thread. Only called for the root sink in volume sharing
1997 * cases, except for internal recursive calls. */
1998 static bool has_inputs(pa_sink *s) {
2002 pa_sink_assert_ref(s);
2004 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2005 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2012 /* Called from main thread. Only called for the root sink in volume sharing
2013 * cases, except for internal recursive calls. */
2014 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2018 pa_sink_assert_ref(s);
2019 pa_assert(new_volume);
2020 pa_assert(channel_map);
2022 s->real_volume = *new_volume;
2023 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2025 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2026 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2027 if (pa_sink_flat_volume_enabled(s)) {
2028 pa_cvolume new_input_volume;
2030 /* Follow the root sink's real volume. */
2031 new_input_volume = *new_volume;
2032 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2033 pa_sink_input_set_volume_direct(i, &new_input_volume);
2034 compute_reference_ratio(i);
2037 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2038 update_real_volume(i->origin_sink, new_volume, channel_map);
2043 /* Called from main thread. Only called for the root sink in shared volume
2045 static void compute_real_volume(pa_sink *s) {
2046 pa_sink_assert_ref(s);
2047 pa_assert_ctl_context();
2048 pa_assert(PA_SINK_IS_LINKED(s->state));
2049 pa_assert(pa_sink_flat_volume_enabled(s));
2050 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2052 /* This determines the maximum volume of all streams and sets
2053 * s->real_volume accordingly. */
2055 if (!has_inputs(s)) {
2056 /* In the special case that we have no sink inputs we leave the
2057 * volume unmodified. */
2058 update_real_volume(s, &s->reference_volume, &s->channel_map);
2062 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2064 /* First let's determine the new maximum volume of all inputs
2065 * connected to this sink */
2066 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2067 update_real_volume(s, &s->real_volume, &s->channel_map);
2069 /* Then, let's update the real ratios/soft volumes of all inputs
2070 * connected to this sink */
2071 compute_real_ratios(s);
2074 /* Called from main thread. Only called for the root sink in shared volume
2075 * cases, except for internal recursive calls. */
2076 static void propagate_reference_volume(pa_sink *s) {
2080 pa_sink_assert_ref(s);
2081 pa_assert_ctl_context();
2082 pa_assert(PA_SINK_IS_LINKED(s->state));
2083 pa_assert(pa_sink_flat_volume_enabled(s));
2085 /* This is called whenever the sink volume changes that is not
2086 * caused by a sink input volume change. We need to fix up the
2087 * sink input volumes accordingly */
2089 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2090 pa_cvolume new_volume;
2092 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2093 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2094 propagate_reference_volume(i->origin_sink);
2096 /* Since the origin sink uses volume sharing, this input's volume
2097 * needs to be updated to match the root sink's real volume, but
2098 * that will be done later in update_real_volume(). */
2102 /* This basically calculates:
2104 * i->volume := s->reference_volume * i->reference_ratio */
2106 new_volume = s->reference_volume;
2107 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2108 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2109 pa_sink_input_set_volume_direct(i, &new_volume);
2113 /* Called from main thread. Only called for the root sink in volume sharing
2114 * cases, except for internal recursive calls. The return value indicates
2115 * whether any reference volume actually changed. */
2116 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2118 bool reference_volume_changed;
2122 pa_sink_assert_ref(s);
2123 pa_assert(PA_SINK_IS_LINKED(s->state));
2125 pa_assert(channel_map);
2126 pa_assert(pa_cvolume_valid(v));
2129 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2131 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2132 pa_sink_set_reference_volume_direct(s, &volume);
2134 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2136 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2137 /* If the root sink's volume doesn't change, then there can't be any
2138 * changes in the other sinks in the sink tree either.
2140 * It's probably theoretically possible that even if the root sink's
2141 * volume changes slightly, some filter sink doesn't change its volume
2142 * due to rounding errors. If that happens, we still want to propagate
2143 * the changed root sink volume to the sinks connected to the
2144 * intermediate sink that didn't change its volume. This theoretical
2145 * possibility is the reason why we have that !(s->flags &
2146 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2147 * notice even if we returned here false always if
2148 * reference_volume_changed is false. */
2151 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2152 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2153 && PA_SINK_IS_LINKED(i->origin_sink->state))
2154 update_reference_volume(i->origin_sink, v, channel_map, false);
2160 /* Called from main thread */
2161 void pa_sink_set_volume(
2163 const pa_cvolume *volume,
2167 pa_cvolume new_reference_volume;
2170 pa_sink_assert_ref(s);
2171 pa_assert_ctl_context();
2172 pa_assert(PA_SINK_IS_LINKED(s->state));
2173 pa_assert(!volume || pa_cvolume_valid(volume));
2174 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2175 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2177 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2178 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2179 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2180 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2184 /* In case of volume sharing, the volume is set for the root sink first,
2185 * from which it's then propagated to the sharing sinks. */
2186 root_sink = pa_sink_get_master(s);
2188 if (PA_UNLIKELY(!root_sink))
2191 /* As a special exception we accept mono volumes on all sinks --
2192 * even on those with more complex channel maps */
2195 if (pa_cvolume_compatible(volume, &s->sample_spec))
2196 new_reference_volume = *volume;
2198 new_reference_volume = s->reference_volume;
2199 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2202 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2204 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2205 if (pa_sink_flat_volume_enabled(root_sink)) {
2206 /* OK, propagate this volume change back to the inputs */
2207 propagate_reference_volume(root_sink);
2209 /* And now recalculate the real volume */
2210 compute_real_volume(root_sink);
2212 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2216 /* If volume is NULL we synchronize the sink's real and
2217 * reference volumes with the stream volumes. */
2219 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2221 /* Ok, let's determine the new real volume */
2222 compute_real_volume(root_sink);
2224 /* Let's 'push' the reference volume if necessary */
2225 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2226 /* If the sink and its root don't have the same number of channels, we need to remap */
2227 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2228 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2229 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2231 /* Now that the reference volume is updated, we can update the streams'
2232 * reference ratios. */
2233 compute_reference_ratios(root_sink);
2236 if (root_sink->set_volume) {
2237 /* If we have a function set_volume(), then we do not apply a
2238 * soft volume by default. However, set_volume() is free to
2239 * apply one to root_sink->soft_volume */
2241 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2242 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2243 root_sink->set_volume(root_sink);
2246 /* If we have no function set_volume(), then the soft volume
2247 * becomes the real volume */
2248 root_sink->soft_volume = root_sink->real_volume;
2250 /* This tells the sink that soft volume and/or real volume changed */
2252 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2255 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2256 * Only to be called by sink implementor */
2257 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2259 pa_sink_assert_ref(s);
2260 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2262 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2263 pa_sink_assert_io_context(s);
2265 pa_assert_ctl_context();
2268 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2270 s->soft_volume = *volume;
2272 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2275 s->thread_info.soft_volume = s->soft_volume;
2278 /* Called from the main thread. Only called for the root sink in volume sharing
2279 * cases, except for internal recursive calls. */
2280 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2284 pa_sink_assert_ref(s);
2285 pa_assert(old_real_volume);
2286 pa_assert_ctl_context();
2287 pa_assert(PA_SINK_IS_LINKED(s->state));
2289 /* This is called when the hardware's real volume changes due to
2290 * some external event. We copy the real volume into our
2291 * reference volume and then rebuild the stream volumes based on
2292 * i->real_ratio which should stay fixed. */
2294 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2295 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2298 /* 1. Make the real volume the reference volume */
2299 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2302 if (pa_sink_flat_volume_enabled(s)) {
2304 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2305 pa_cvolume new_volume;
2307 /* 2. Since the sink's reference and real volumes are equal
2308 * now our ratios should be too. */
2309 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2311 /* 3. Recalculate the new stream reference volume based on the
2312 * reference ratio and the sink's reference volume.
2314 * This basically calculates:
2316 * i->volume = s->reference_volume * i->reference_ratio
2318 * This is identical to propagate_reference_volume() */
2319 new_volume = s->reference_volume;
2320 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2321 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2322 pa_sink_input_set_volume_direct(i, &new_volume);
2324 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2325 && PA_SINK_IS_LINKED(i->origin_sink->state))
2326 propagate_real_volume(i->origin_sink, old_real_volume);
2330 /* Something got changed in the hardware. It probably makes sense
2331 * to save changed hw settings given that hw volume changes not
2332 * triggered by PA are almost certainly done by the user. */
2333 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2334 s->save_volume = true;
2337 /* Called from io thread */
2338 void pa_sink_update_volume_and_mute(pa_sink *s) {
2340 pa_sink_assert_io_context(s);
2342 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2345 /* Called from main thread */
2346 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2347 pa_sink_assert_ref(s);
2348 pa_assert_ctl_context();
2349 pa_assert(PA_SINK_IS_LINKED(s->state));
2351 if (s->refresh_volume || force_refresh) {
2352 struct pa_cvolume old_real_volume;
2354 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2356 old_real_volume = s->real_volume;
2358 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2361 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2363 update_real_volume(s, &s->real_volume, &s->channel_map);
2364 propagate_real_volume(s, &old_real_volume);
2367 return &s->reference_volume;
2370 /* Called from main thread. In volume sharing cases, only the root sink may
2372 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2373 pa_cvolume old_real_volume;
2375 pa_sink_assert_ref(s);
2376 pa_assert_ctl_context();
2377 pa_assert(PA_SINK_IS_LINKED(s->state));
2378 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2380 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2382 old_real_volume = s->real_volume;
2383 update_real_volume(s, new_real_volume, &s->channel_map);
2384 propagate_real_volume(s, &old_real_volume);
2387 /* Called from main thread */
2388 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2391 pa_sink_assert_ref(s);
2392 pa_assert_ctl_context();
2394 old_muted = s->muted;
2396 if (mute == old_muted) {
2397 s->save_muted |= save;
2402 s->save_muted = save;
2404 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2405 s->set_mute_in_progress = true;
2407 s->set_mute_in_progress = false;
2410 if (!PA_SINK_IS_LINKED(s->state))
2413 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2414 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2415 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2416 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2419 /* Called from main thread */
2420 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2422 pa_sink_assert_ref(s);
2423 pa_assert_ctl_context();
2424 pa_assert(PA_SINK_IS_LINKED(s->state));
2426 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2429 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2430 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2431 pa_sink_mute_changed(s, mute);
2433 if (s->get_mute(s, &mute) >= 0)
2434 pa_sink_mute_changed(s, mute);
2441 /* Called from main thread */
2442 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2443 pa_sink_assert_ref(s);
2444 pa_assert_ctl_context();
2445 pa_assert(PA_SINK_IS_LINKED(s->state));
2447 if (s->set_mute_in_progress)
2450 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2451 * but we must have this here also, because the save parameter of
2452 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2453 * the mute state when it shouldn't be saved). */
2454 if (new_muted == s->muted)
2457 pa_sink_set_mute(s, new_muted, true);
2460 /* Called from main thread */
2461 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2462 pa_sink_assert_ref(s);
2463 pa_assert_ctl_context();
2466 pa_proplist_update(s->proplist, mode, p);
2468 if (PA_SINK_IS_LINKED(s->state)) {
2469 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2470 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2476 /* Called from main thread */
2477 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2478 void pa_sink_set_description(pa_sink *s, const char *description) {
2480 pa_sink_assert_ref(s);
2481 pa_assert_ctl_context();
2483 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2486 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2488 if (old && description && pa_streq(old, description))
2492 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2494 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2496 if (s->monitor_source) {
2499 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2500 pa_source_set_description(s->monitor_source, n);
2504 if (PA_SINK_IS_LINKED(s->state)) {
2505 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2506 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2510 /* Called from main thread */
2511 unsigned pa_sink_linked_by(pa_sink *s) {
2514 pa_sink_assert_ref(s);
2515 pa_assert_ctl_context();
2516 pa_assert(PA_SINK_IS_LINKED(s->state));
2518 ret = pa_idxset_size(s->inputs);
2520 /* We add in the number of streams connected to us here. Please
2521 * note the asymmetry to pa_sink_used_by()! */
2523 if (s->monitor_source)
2524 ret += pa_source_linked_by(s->monitor_source);
2529 /* Called from main thread */
2530 unsigned pa_sink_used_by(pa_sink *s) {
2533 pa_sink_assert_ref(s);
2534 pa_assert_ctl_context();
2535 pa_assert(PA_SINK_IS_LINKED(s->state));
2537 ret = pa_idxset_size(s->inputs);
2538 pa_assert(ret >= s->n_corked);
2540 /* Streams connected to our monitor source do not matter for
2541 * pa_sink_used_by()!.*/
2543 return ret - s->n_corked;
2546 /* Called from main thread */
2547 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2552 pa_sink_assert_ref(s);
2553 pa_assert_ctl_context();
2555 if (!PA_SINK_IS_LINKED(s->state))
2560 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2561 pa_sink_input_state_t st;
2563 if (i == ignore_input)
2566 st = pa_sink_input_get_state(i);
2568 /* We do not assert here. It is perfectly valid for a sink input to
2569 * be in the INIT state (i.e. created, marked done but not yet put)
2570 * and we should not care if it's unlinked as it won't contribute
2571 * towards our busy status.
2573 if (!PA_SINK_INPUT_IS_LINKED(st))
2576 if (st == PA_SINK_INPUT_CORKED)
2579 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2585 if (s->monitor_source)
2586 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2591 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2593 case PA_SINK_INIT: return "INIT";
2594 case PA_SINK_IDLE: return "IDLE";
2595 case PA_SINK_RUNNING: return "RUNNING";
2596 case PA_SINK_SUSPENDED: return "SUSPENDED";
2597 case PA_SINK_UNLINKED: return "UNLINKED";
2598 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2601 pa_assert_not_reached();
2604 /* Called from the IO thread */
2605 static void sync_input_volumes_within_thread(pa_sink *s) {
2609 pa_sink_assert_ref(s);
2610 pa_sink_assert_io_context(s);
2612 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2613 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2616 i->thread_info.soft_volume = i->soft_volume;
2617 pa_sink_input_request_rewind(i, 0, true, false, false);
2621 /* Called from the IO thread. Only called for the root sink in volume sharing
2622 * cases, except for internal recursive calls. */
2623 static void set_shared_volume_within_thread(pa_sink *s) {
2624 pa_sink_input *i = NULL;
2627 pa_sink_assert_ref(s);
2629 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2631 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2632 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2633 set_shared_volume_within_thread(i->origin_sink);
2637 /* Called from IO thread, except when it is not */
2638 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2639 pa_sink *s = PA_SINK(o);
2640 pa_sink_assert_ref(s);
2642 switch ((pa_sink_message_t) code) {
2644 case PA_SINK_MESSAGE_ADD_INPUT: {
2645 pa_sink_input *i = PA_SINK_INPUT(userdata);
2647 /* If you change anything here, make sure to change the
2648 * sink input handling a few lines down at
2649 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2651 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2653 /* Since the caller sleeps in pa_sink_input_put(), we can
2654 * safely access data outside of thread_info even though
2657 if ((i->thread_info.sync_prev = i->sync_prev)) {
2658 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2659 pa_assert(i->sync_prev->sync_next == i);
2660 i->thread_info.sync_prev->thread_info.sync_next = i;
2663 if ((i->thread_info.sync_next = i->sync_next)) {
2664 pa_assert(i->sink == i->thread_info.sync_next->sink);
2665 pa_assert(i->sync_next->sync_prev == i);
2666 i->thread_info.sync_next->thread_info.sync_prev = i;
2669 pa_sink_input_attach(i);
2671 pa_sink_input_set_state_within_thread(i, i->state);
2673 /* The requested latency of the sink input needs to be fixed up and
2674 * then configured on the sink. If this causes the sink latency to
2675 * go down, the sink implementor is responsible for doing a rewind
2676 * in the update_requested_latency() callback to ensure that the
2677 * sink buffer doesn't contain more data than what the new latency
2680 * XXX: Does it really make sense to push this responsibility to
2681 * the sink implementors? Wouldn't it be better to do it once in
2682 * the core than many times in the modules? */
2684 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2685 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2687 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2688 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2690 /* We don't rewind here automatically. This is left to the
2691 * sink input implementor because some sink inputs need a
2692 * slow start, i.e. need some time to buffer client
2693 * samples before beginning streaming.
2695 * XXX: Does it really make sense to push this functionality to
2696 * the sink implementors? Wouldn't it be better to do it once in
2697 * the core than many times in the modules? */
2699 /* In flat volume mode we need to update the volume as
2701 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2704 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2705 pa_sink_input *i = PA_SINK_INPUT(userdata);
2707 /* If you change anything here, make sure to change the
2708 * sink input handling a few lines down at
2709 * PA_SINK_MESSAGE_START_MOVE, too. */
2711 pa_sink_input_detach(i);
2713 pa_sink_input_set_state_within_thread(i, i->state);
2715 /* Since the caller sleeps in pa_sink_input_unlink(),
2716 * we can safely access data outside of thread_info even
2717 * though it is mutable */
2719 pa_assert(!i->sync_prev);
2720 pa_assert(!i->sync_next);
2722 if (i->thread_info.sync_prev) {
2723 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2724 i->thread_info.sync_prev = NULL;
2727 if (i->thread_info.sync_next) {
2728 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2729 i->thread_info.sync_next = NULL;
2732 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2733 pa_sink_invalidate_requested_latency(s, true);
2734 pa_sink_request_rewind(s, (size_t) -1);
2736 /* In flat volume mode we need to update the volume as
2738 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2741 case PA_SINK_MESSAGE_START_MOVE: {
2742 pa_sink_input *i = PA_SINK_INPUT(userdata);
2744 /* We don't support moving synchronized streams. */
2745 pa_assert(!i->sync_prev);
2746 pa_assert(!i->sync_next);
2747 pa_assert(!i->thread_info.sync_next);
2748 pa_assert(!i->thread_info.sync_prev);
2750 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2752 size_t sink_nbytes, total_nbytes;
2754 /* The old sink probably has some audio from this
2755 * stream in its buffer. We want to "take it back" as
2756 * much as possible and play it to the new sink. We
2757 * don't know at this point how much the old sink can
2758 * rewind. We have to pick something, and that
2759 * something is the full latency of the old sink here.
2760 * So we rewind the stream buffer by the sink latency
2761 * amount, which may be more than what we should
2762 * rewind. This can result in a chunk of audio being
2763 * played both to the old sink and the new sink.
2765 * FIXME: Fix this code so that we don't have to make
2766 * guesses about how much the sink will actually be
2767 * able to rewind. If someone comes up with a solution
2768 * for this, something to note is that the part of the
2769 * latency that the old sink couldn't rewind should
2770 * ideally be compensated after the stream has moved
2771 * to the new sink by adding silence. The new sink
2772 * most likely can't start playing the moved stream
2773 * immediately, and that gap should be removed from
2774 * the "compensation silence" (at least at the time of
2775 * writing this, the move finish code will actually
2776 * already take care of dropping the new sink's
2777 * unrewindable latency, so taking into account the
2778 * unrewindable latency of the old sink is the only
2781 * The render_memblockq contents are discarded,
2782 * because when the sink changes, the format of the
2783 * audio stored in the render_memblockq may change
2784 * too, making the stored audio invalid. FIXME:
2785 * However, the read and write indices are moved back
2786 * the same amount, so if they are not the same now,
2787 * they won't be the same after the rewind either. If
2788 * the write index of the render_memblockq is ahead of
2789 * the read index, then the render_memblockq will feed
2790 * the new sink some silence first, which it shouldn't
2791 * do. The write index should be flushed to be the
2792 * same as the read index. */
2794 /* Get the latency of the sink */
2795 usec = pa_sink_get_latency_within_thread(s, false);
2796 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2797 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2799 if (total_nbytes > 0) {
2800 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2801 i->thread_info.rewrite_flush = true;
2802 pa_sink_input_process_rewind(i, sink_nbytes);
2806 pa_sink_input_detach(i);
2808 /* Let's remove the sink input ...*/
2809 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2811 pa_sink_invalidate_requested_latency(s, true);
2813 pa_log_debug("Requesting rewind due to started move");
2814 pa_sink_request_rewind(s, (size_t) -1);
2816 /* In flat volume mode we need to update the volume as
2818 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2821 case PA_SINK_MESSAGE_FINISH_MOVE: {
2822 pa_sink_input *i = PA_SINK_INPUT(userdata);
2824 /* We don't support moving synchronized streams. */
2825 pa_assert(!i->sync_prev);
2826 pa_assert(!i->sync_next);
2827 pa_assert(!i->thread_info.sync_next);
2828 pa_assert(!i->thread_info.sync_prev);
2830 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2832 pa_sink_input_attach(i);
2834 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2838 /* In the ideal case the new sink would start playing
2839 * the stream immediately. That requires the sink to
2840 * be able to rewind all of its latency, which usually
2841 * isn't possible, so there will probably be some gap
2842 * before the moved stream becomes audible. We then
2843 * have two possibilities: 1) start playing the stream
2844 * from where it is now, or 2) drop the unrewindable
2845 * latency of the sink from the stream. With option 1
2846 * we won't lose any audio but the stream will have a
2847 * pause. With option 2 we may lose some audio but the
2848 * stream time will be somewhat in sync with the wall
2849 * clock. Lennart seems to have chosen option 2 (one
2850 * of the reasons might have been that option 1 is
2851 * actually much harder to implement), so we drop the
2852 * latency of the new sink from the moved stream and
2853 * hope that the sink will undo most of that in the
2856 /* Get the latency of the sink */
2857 usec = pa_sink_get_latency_within_thread(s, false);
2858 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2861 pa_sink_input_drop(i, nbytes);
2863 pa_log_debug("Requesting rewind due to finished move");
2864 pa_sink_request_rewind(s, nbytes);
2867 /* Updating the requested sink latency has to be done
2868 * after the sink rewind request, not before, because
2869 * otherwise the sink may limit the rewind amount
2872 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2873 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2875 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2876 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2878 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2881 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2882 pa_sink *root_sink = pa_sink_get_master(s);
2884 if (PA_LIKELY(root_sink))
2885 set_shared_volume_within_thread(root_sink);
2890 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2892 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2894 pa_sink_volume_change_push(s);
2896 /* Fall through ... */
2898 case PA_SINK_MESSAGE_SET_VOLUME:
2900 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2901 s->thread_info.soft_volume = s->soft_volume;
2902 pa_sink_request_rewind(s, (size_t) -1);
2905 /* Fall through ... */
2907 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2908 sync_input_volumes_within_thread(s);
2911 case PA_SINK_MESSAGE_GET_VOLUME:
2913 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2915 pa_sink_volume_change_flush(s);
2916 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2919 /* In case sink implementor reset SW volume. */
2920 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2921 s->thread_info.soft_volume = s->soft_volume;
2922 pa_sink_request_rewind(s, (size_t) -1);
2927 case PA_SINK_MESSAGE_SET_MUTE:
2929 if (s->thread_info.soft_muted != s->muted) {
2930 s->thread_info.soft_muted = s->muted;
2931 pa_sink_request_rewind(s, (size_t) -1);
2934 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2939 case PA_SINK_MESSAGE_GET_MUTE:
2941 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2942 return s->get_mute(s, userdata);
2946 case PA_SINK_MESSAGE_SET_STATE: {
2948 bool suspend_change =
2949 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2950 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2952 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2954 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2955 s->thread_info.rewind_nbytes = 0;
2956 s->thread_info.rewind_requested = false;
2959 if (suspend_change) {
2963 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2964 if (i->suspend_within_thread)
2965 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2971 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2973 pa_usec_t *usec = userdata;
2974 *usec = pa_sink_get_requested_latency_within_thread(s);
2976 /* Yes, that's right, the IO thread will see -1 when no
2977 * explicit requested latency is configured, the main
2978 * thread will see max_latency */
2979 if (*usec == (pa_usec_t) -1)
2980 *usec = s->thread_info.max_latency;
2985 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2986 pa_usec_t *r = userdata;
2988 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2993 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2994 pa_usec_t *r = userdata;
2996 r[0] = s->thread_info.min_latency;
2997 r[1] = s->thread_info.max_latency;
3002 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3004 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3007 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3009 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3012 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3014 *((size_t*) userdata) = s->thread_info.max_rewind;
3017 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3019 *((size_t*) userdata) = s->thread_info.max_request;
3022 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3024 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3027 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3029 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3032 case PA_SINK_MESSAGE_SET_PORT:
3034 pa_assert(userdata);
3036 struct sink_message_set_port *msg_data = userdata;
3037 msg_data->ret = s->set_port(s, msg_data->port);
3041 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3042 /* This message is sent from IO-thread and handled in main thread. */
3043 pa_assert_ctl_context();
3045 /* Make sure we're not messing with main thread when no longer linked */
3046 if (!PA_SINK_IS_LINKED(s->state))
3049 pa_sink_get_volume(s, true);
3050 pa_sink_get_mute(s, true);
3053 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3054 s->thread_info.port_latency_offset = offset;
3057 case PA_SINK_MESSAGE_GET_LATENCY:
3058 case PA_SINK_MESSAGE_MAX:
3065 /* Called from main thread */
3066 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3071 pa_core_assert_ref(c);
3072 pa_assert_ctl_context();
3073 pa_assert(cause != 0);
3075 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3078 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3085 /* Called from IO thread */
3086 void pa_sink_detach_within_thread(pa_sink *s) {
3090 pa_sink_assert_ref(s);
3091 pa_sink_assert_io_context(s);
3092 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3094 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3095 pa_sink_input_detach(i);
3097 if (s->monitor_source)
3098 pa_source_detach_within_thread(s->monitor_source);
3101 /* Called from IO thread */
3102 void pa_sink_attach_within_thread(pa_sink *s) {
3106 pa_sink_assert_ref(s);
3107 pa_sink_assert_io_context(s);
3108 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3110 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3111 pa_sink_input_attach(i);
3113 if (s->monitor_source)
3114 pa_source_attach_within_thread(s->monitor_source);
3117 /* Called from IO thread */
3118 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3119 pa_sink_assert_ref(s);
3120 pa_sink_assert_io_context(s);
3121 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3123 if (nbytes == (size_t) -1)
3124 nbytes = s->thread_info.max_rewind;
3126 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3128 if (s->thread_info.rewind_requested &&
3129 nbytes <= s->thread_info.rewind_nbytes)
3132 s->thread_info.rewind_nbytes = nbytes;
3133 s->thread_info.rewind_requested = true;
3135 if (s->request_rewind)
3136 s->request_rewind(s);
3139 /* Called from IO thread */
3140 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3141 pa_usec_t result = (pa_usec_t) -1;
3144 pa_usec_t monitor_latency;
3146 pa_sink_assert_ref(s);
3147 pa_sink_assert_io_context(s);
3149 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3150 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3152 if (s->thread_info.requested_latency_valid)
3153 return s->thread_info.requested_latency;
3155 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3156 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3157 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3158 result = i->thread_info.requested_sink_latency;
3160 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3162 if (monitor_latency != (pa_usec_t) -1 &&
3163 (result == (pa_usec_t) -1 || result > monitor_latency))
3164 result = monitor_latency;
3166 if (result != (pa_usec_t) -1)
3167 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3169 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3170 /* Only cache if properly initialized */
3171 s->thread_info.requested_latency = result;
3172 s->thread_info.requested_latency_valid = true;
3178 /* Called from main thread */
3179 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3182 pa_sink_assert_ref(s);
3183 pa_assert_ctl_context();
3184 pa_assert(PA_SINK_IS_LINKED(s->state));
3186 if (s->state == PA_SINK_SUSPENDED)
3189 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3194 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3195 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3199 pa_sink_assert_ref(s);
3200 pa_sink_assert_io_context(s);
3202 if (max_rewind == s->thread_info.max_rewind)
3205 s->thread_info.max_rewind = max_rewind;
3207 if (PA_SINK_IS_LINKED(s->thread_info.state))
3208 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3209 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3211 if (s->monitor_source)
3212 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3215 /* Called from main thread */
3216 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3217 pa_sink_assert_ref(s);
3218 pa_assert_ctl_context();
3220 if (PA_SINK_IS_LINKED(s->state))
3221 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3223 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3226 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3227 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3230 pa_sink_assert_ref(s);
3231 pa_sink_assert_io_context(s);
3233 if (max_request == s->thread_info.max_request)
3236 s->thread_info.max_request = max_request;
3238 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3241 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3242 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3246 /* Called from main thread */
3247 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3248 pa_sink_assert_ref(s);
3249 pa_assert_ctl_context();
3251 if (PA_SINK_IS_LINKED(s->state))
3252 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3254 pa_sink_set_max_request_within_thread(s, max_request);
3257 /* Called from IO thread */
3258 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3262 pa_sink_assert_ref(s);
3263 pa_sink_assert_io_context(s);
3265 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3266 s->thread_info.requested_latency_valid = false;
3270 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3272 if (s->update_requested_latency)
3273 s->update_requested_latency(s);
3275 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3276 if (i->update_sink_requested_latency)
3277 i->update_sink_requested_latency(i);
3281 /* Called from main thread */
3282 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3283 pa_sink_assert_ref(s);
3284 pa_assert_ctl_context();
3286 /* min_latency == 0: no limit
3287 * min_latency anything else: specified limit
3289 * Similar for max_latency */
3291 if (min_latency < ABSOLUTE_MIN_LATENCY)
3292 min_latency = ABSOLUTE_MIN_LATENCY;
3294 if (max_latency <= 0 ||
3295 max_latency > ABSOLUTE_MAX_LATENCY)
3296 max_latency = ABSOLUTE_MAX_LATENCY;
3298 pa_assert(min_latency <= max_latency);
3300 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3301 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3302 max_latency == ABSOLUTE_MAX_LATENCY) ||
3303 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3305 if (PA_SINK_IS_LINKED(s->state)) {
3311 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3313 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3316 /* Called from main thread */
3317 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3318 pa_sink_assert_ref(s);
3319 pa_assert_ctl_context();
3320 pa_assert(min_latency);
3321 pa_assert(max_latency);
3323 if (PA_SINK_IS_LINKED(s->state)) {
3324 pa_usec_t r[2] = { 0, 0 };
3326 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3328 *min_latency = r[0];
3329 *max_latency = r[1];
3331 *min_latency = s->thread_info.min_latency;
3332 *max_latency = s->thread_info.max_latency;
3336 /* Called from IO thread */
3337 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3338 pa_sink_assert_ref(s);
3339 pa_sink_assert_io_context(s);
3341 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3342 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3343 pa_assert(min_latency <= max_latency);
3345 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3346 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3347 max_latency == ABSOLUTE_MAX_LATENCY) ||
3348 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3350 if (s->thread_info.min_latency == min_latency &&
3351 s->thread_info.max_latency == max_latency)
3354 s->thread_info.min_latency = min_latency;
3355 s->thread_info.max_latency = max_latency;
3357 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3361 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3362 if (i->update_sink_latency_range)
3363 i->update_sink_latency_range(i);
3366 pa_sink_invalidate_requested_latency(s, false);
3368 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3371 /* Called from main thread */
3372 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3373 pa_sink_assert_ref(s);
3374 pa_assert_ctl_context();
3376 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3377 pa_assert(latency == 0);
3381 if (latency < ABSOLUTE_MIN_LATENCY)
3382 latency = ABSOLUTE_MIN_LATENCY;
3384 if (latency > ABSOLUTE_MAX_LATENCY)
3385 latency = ABSOLUTE_MAX_LATENCY;
3387 if (PA_SINK_IS_LINKED(s->state))
3388 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3390 s->thread_info.fixed_latency = latency;
3392 pa_source_set_fixed_latency(s->monitor_source, latency);
3395 /* Called from main thread */
3396 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3399 pa_sink_assert_ref(s);
3400 pa_assert_ctl_context();
3402 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3405 if (PA_SINK_IS_LINKED(s->state))
3406 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3408 latency = s->thread_info.fixed_latency;
3413 /* Called from IO thread */
3414 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3415 pa_sink_assert_ref(s);
3416 pa_sink_assert_io_context(s);
3418 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3419 pa_assert(latency == 0);
3420 s->thread_info.fixed_latency = 0;
3422 if (s->monitor_source)
3423 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3428 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3429 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3431 if (s->thread_info.fixed_latency == latency)
3434 s->thread_info.fixed_latency = latency;
3436 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3440 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3441 if (i->update_sink_fixed_latency)
3442 i->update_sink_fixed_latency(i);
3445 pa_sink_invalidate_requested_latency(s, false);
3447 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3450 /* Called from main context */
3451 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3452 pa_sink_assert_ref(s);
3454 s->port_latency_offset = offset;
3456 if (PA_SINK_IS_LINKED(s->state))
3457 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3459 s->thread_info.port_latency_offset = offset;
3461 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3464 /* Called from main context */
3465 size_t pa_sink_get_max_rewind(pa_sink *s) {
3467 pa_assert_ctl_context();
3468 pa_sink_assert_ref(s);
3470 if (!PA_SINK_IS_LINKED(s->state))
3471 return s->thread_info.max_rewind;
3473 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3478 /* Called from main context */
3479 size_t pa_sink_get_max_request(pa_sink *s) {
3481 pa_sink_assert_ref(s);
3482 pa_assert_ctl_context();
3484 if (!PA_SINK_IS_LINKED(s->state))
3485 return s->thread_info.max_request;
3487 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3492 /* Called from main context */
3493 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3494 pa_device_port *port;
3497 pa_sink_assert_ref(s);
3498 pa_assert_ctl_context();
3501 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3502 return -PA_ERR_NOTIMPLEMENTED;
3506 return -PA_ERR_NOENTITY;
3508 if (!(port = pa_hashmap_get(s->ports, name)))
3509 return -PA_ERR_NOENTITY;
3511 if (s->active_port == port) {
3512 s->save_port = s->save_port || save;
3516 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3517 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3518 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3522 ret = s->set_port(s, port);
3525 return -PA_ERR_NOENTITY;
3527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3529 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3531 s->active_port = port;
3532 s->save_port = save;
3534 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3536 /* The active port affects the default sink selection. */
3537 pa_core_update_default_sink(s->core);
3539 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3544 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3545 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3549 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3552 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3554 if (pa_streq(ff, "microphone"))
3555 t = "audio-input-microphone";
3556 else if (pa_streq(ff, "webcam"))
3558 else if (pa_streq(ff, "computer"))
3560 else if (pa_streq(ff, "handset"))
3562 else if (pa_streq(ff, "portable"))
3563 t = "multimedia-player";
3564 else if (pa_streq(ff, "tv"))
3565 t = "video-display";
3568 * The following icons are not part of the icon naming spec,
3569 * because Rodney Dawes sucks as the maintainer of that spec.
3571 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3573 else if (pa_streq(ff, "headset"))
3574 t = "audio-headset";
3575 else if (pa_streq(ff, "headphone"))
3576 t = "audio-headphones";
3577 else if (pa_streq(ff, "speaker"))
3578 t = "audio-speakers";
3579 else if (pa_streq(ff, "hands-free"))
3580 t = "audio-handsfree";
3584 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3585 if (pa_streq(c, "modem"))
3592 t = "audio-input-microphone";
3595 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3596 if (strstr(profile, "analog"))
3598 else if (strstr(profile, "iec958"))
3600 else if (strstr(profile, "hdmi"))
3604 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3606 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3611 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3612 const char *s, *d = NULL, *k;
3615 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3619 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3623 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3624 if (pa_streq(s, "internal"))
3625 d = _("Built-in Audio");
3628 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3629 if (pa_streq(s, "modem"))
3633 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3638 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3641 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3643 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3648 bool pa_device_init_intended_roles(pa_proplist *p) {
3652 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3655 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3656 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3657 || pa_streq(s, "headset")) {
3658 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3665 unsigned pa_device_init_priority(pa_proplist *p) {
3667 unsigned priority = 0;
3671 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3673 if (pa_streq(s, "sound"))
3675 else if (!pa_streq(s, "modem"))
3679 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3681 if (pa_streq(s, "headphone"))
3683 else if (pa_streq(s, "hifi"))
3685 else if (pa_streq(s, "speaker"))
3687 else if (pa_streq(s, "portable"))
3691 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3693 if (pa_streq(s, "bluetooth"))
3695 else if (pa_streq(s, "usb"))
3697 else if (pa_streq(s, "pci"))
3701 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3703 if (pa_startswith(s, "analog-"))
3705 else if (pa_startswith(s, "iec958-"))
3712 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3714 /* Called from the IO thread. */
3715 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3716 pa_sink_volume_change *c;
3717 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3718 c = pa_xnew(pa_sink_volume_change, 1);
3720 PA_LLIST_INIT(pa_sink_volume_change, c);
3722 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3726 /* Called from the IO thread. */
3727 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3729 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3733 /* Called from the IO thread. */
3734 void pa_sink_volume_change_push(pa_sink *s) {
3735 pa_sink_volume_change *c = NULL;
3736 pa_sink_volume_change *nc = NULL;
3737 pa_sink_volume_change *pc = NULL;
3738 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3740 const char *direction = NULL;
3743 nc = pa_sink_volume_change_new(s);
3745 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3746 * Adding one more volume for HW would get us rid of this, but I am trying
3747 * to survive with the ones we already have. */
3748 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3750 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3751 pa_log_debug("Volume not changing");
3752 pa_sink_volume_change_free(nc);
3756 nc->at = pa_sink_get_latency_within_thread(s, false);
3757 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3759 if (s->thread_info.volume_changes_tail) {
3760 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3761 /* If volume is going up let's do it a bit late. If it is going
3762 * down let's do it a bit early. */
3763 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3764 if (nc->at + safety_margin > c->at) {
3765 nc->at += safety_margin;
3770 else if (nc->at - safety_margin > c->at) {
3771 nc->at -= safety_margin;
3779 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3780 nc->at += safety_margin;
3783 nc->at -= safety_margin;
3786 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3789 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3792 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3794 /* We can ignore volume events that came earlier but should happen later than this. */
3795 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3796 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3797 pa_sink_volume_change_free(c);
3800 s->thread_info.volume_changes_tail = nc;
3803 /* Called from the IO thread. */
3804 static void pa_sink_volume_change_flush(pa_sink *s) {
3805 pa_sink_volume_change *c = s->thread_info.volume_changes;
3807 s->thread_info.volume_changes = NULL;
3808 s->thread_info.volume_changes_tail = NULL;
3810 pa_sink_volume_change *next = c->next;
3811 pa_sink_volume_change_free(c);
3816 /* Called from the IO thread. */
3817 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3823 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3829 pa_assert(s->write_volume);
3831 now = pa_rtclock_now();
3833 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3834 pa_sink_volume_change *c = s->thread_info.volume_changes;
3835 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3836 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3837 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3839 s->thread_info.current_hw_volume = c->hw_volume;
3840 pa_sink_volume_change_free(c);
3846 if (s->thread_info.volume_changes) {
3848 *usec_to_next = s->thread_info.volume_changes->at - now;
3849 if (pa_log_ratelimit(PA_LOG_DEBUG))
3850 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3855 s->thread_info.volume_changes_tail = NULL;
3860 /* Called from the IO thread. */
3861 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3862 /* All the queued volume events later than current latency are shifted to happen earlier. */
3863 pa_sink_volume_change *c;
3864 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3865 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3866 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3868 pa_log_debug("latency = %lld", (long long) limit);
3869 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3871 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3872 pa_usec_t modified_limit = limit;
3873 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3874 modified_limit -= s->thread_info.volume_change_safety_margin;
3876 modified_limit += s->thread_info.volume_change_safety_margin;
3877 if (c->at > modified_limit) {
3879 if (c->at < modified_limit)
3880 c->at = modified_limit;
3882 prev_vol = pa_cvolume_avg(&c->hw_volume);
3884 pa_sink_volume_change_apply(s, NULL);
3887 /* Called from the main thread */
3888 /* Gets the list of formats supported by the sink. The members and idxset must
3889 * be freed by the caller. */
3890 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3895 if (s->get_formats) {
3896 /* Sink supports format query, all is good */
3897 ret = s->get_formats(s);
3899 /* Sink doesn't support format query, so assume it does PCM */
3900 pa_format_info *f = pa_format_info_new();
3901 f->encoding = PA_ENCODING_PCM;
3903 ret = pa_idxset_new(NULL, NULL);
3904 pa_idxset_put(ret, f, NULL);
3910 /* Called from the main thread */
3911 /* Allows an external source to set what formats a sink supports if the sink
3912 * permits this. The function makes a copy of the formats on success. */
3913 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3918 /* Sink supports setting formats -- let's give it a shot */
3919 return s->set_formats(s, formats);
3921 /* Sink doesn't support setting this -- bail out */
3925 /* Called from the main thread */
3926 /* Checks if the sink can accept this format */
3927 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3928 pa_idxset *formats = NULL;
3934 formats = pa_sink_get_formats(s);
3937 pa_format_info *finfo_device;
3940 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3941 if (pa_format_info_is_compatible(finfo_device, f)) {
3947 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3953 /* Called from the main thread */
3954 /* Calculates the intersection between formats supported by the sink and
3955 * in_formats, and returns these, in the order of the sink's formats. */
3956 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3957 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3958 pa_format_info *f_sink, *f_in;
3963 if (!in_formats || pa_idxset_isempty(in_formats))
3966 sink_formats = pa_sink_get_formats(s);
3968 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3969 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3970 if (pa_format_info_is_compatible(f_sink, f_in))
3971 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3977 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3982 /* Called from the main thread. */
3983 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3984 pa_cvolume old_volume;
3985 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3986 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3991 old_volume = s->reference_volume;
3993 if (pa_cvolume_equal(volume, &old_volume))
3996 s->reference_volume = *volume;
3997 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3998 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3999 s->flags & PA_SINK_DECIBEL_VOLUME),
4000 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4001 s->flags & PA_SINK_DECIBEL_VOLUME));
4003 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4004 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);