2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
55 #define MAX_MIX_CHANNELS 32
56 #define MIX_BUFFER_LENGTH (pa_page_size())
57 #define ABSOLUTE_MIN_LATENCY (500)
58 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
59 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
61 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
63 struct pa_sink_volume_change {
67 PA_LLIST_FIELDS(pa_sink_volume_change);
70 struct sink_message_set_port {
75 static void sink_free(pa_object *s);
77 static void pa_sink_volume_change_push(pa_sink *s);
78 static void pa_sink_volume_change_flush(pa_sink *s);
79 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
82 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
84 char *dump_time = NULL, *dump_path_surfix = NULL;
85 const char *s_device_api_str, *card_name_str, *device_idx_str;
90 /* open file for dump pcm */
91 if (s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
92 pa_gettimeofday(&now);
93 localtime_r(&now.tv_sec, &tm);
94 memset(&datetime[0], 0x00, sizeof(datetime));
95 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
96 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
98 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
99 if (pa_streq(s_device_api_str, "alsa")) {
100 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
101 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
102 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
104 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
107 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
110 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
111 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
113 s->pcm_dump_fp = fopen(s->dump_path, "w");
115 pa_log_warn("%s open failed", s->dump_path);
117 pa_log_info("%s opened", s->dump_path);
120 pa_xfree(dump_path_surfix);
121 /* close file for dump pcm when config is changed */
122 } else if (~s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && s->pcm_dump_fp) {
123 fclose(s->pcm_dump_fp);
124 pa_log_info("%s closed", s->dump_path);
125 pa_xfree(s->dump_path);
126 s->pcm_dump_fp = NULL;
130 if (s->pcm_dump_fp) {
133 ptr = pa_memblock_acquire(chunk->memblock);
135 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
137 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
139 pa_memblock_release(chunk->memblock);
144 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
148 data->proplist = pa_proplist_new();
149 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
154 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
157 pa_xfree(data->name);
158 data->name = pa_xstrdup(name);
161 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
164 if ((data->sample_spec_is_set = !!spec))
165 data->sample_spec = *spec;
168 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
171 if ((data->channel_map_is_set = !!map))
172 data->channel_map = *map;
175 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
178 data->alternate_sample_rate_is_set = true;
179 data->alternate_sample_rate = alternate_sample_rate;
182 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
185 if ((data->volume_is_set = !!volume))
186 data->volume = *volume;
189 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
192 data->muted_is_set = true;
196 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
199 pa_xfree(data->active_port);
200 data->active_port = pa_xstrdup(port);
203 void pa_sink_new_data_done(pa_sink_new_data *data) {
206 pa_proplist_free(data->proplist);
209 pa_hashmap_free(data->ports);
211 pa_xfree(data->name);
212 pa_xfree(data->active_port);
215 /* Called from main context */
216 static void reset_callbacks(pa_sink *s) {
220 s->get_volume = NULL;
221 s->set_volume = NULL;
222 s->write_volume = NULL;
225 s->request_rewind = NULL;
226 s->update_requested_latency = NULL;
228 s->get_formats = NULL;
229 s->set_formats = NULL;
230 s->reconfigure = NULL;
233 /* Called from main context */
234 pa_sink* pa_sink_new(
236 pa_sink_new_data *data,
237 pa_sink_flags_t flags) {
241 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
242 pa_source_new_data source_data;
248 pa_assert(data->name);
249 pa_assert_ctl_context();
251 s = pa_msgobject_new(pa_sink);
253 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
254 pa_log_debug("Failed to register name %s.", data->name);
259 pa_sink_new_data_set_name(data, name);
261 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
263 pa_namereg_unregister(core, name);
267 /* FIXME, need to free s here on failure */
269 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
270 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
272 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
274 if (!data->channel_map_is_set)
275 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
277 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
278 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
280 /* FIXME: There should probably be a general function for checking whether
281 * the sink volume is allowed to be set, like there is for sink inputs. */
282 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
284 if (!data->volume_is_set) {
285 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
286 data->save_volume = false;
289 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
290 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
292 if (!data->muted_is_set)
296 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
298 pa_device_init_description(data->proplist, data->card);
299 pa_device_init_icon(data->proplist, true);
300 pa_device_init_intended_roles(data->proplist);
302 if (!data->active_port) {
303 pa_device_port *p = pa_device_port_find_best(data->ports);
305 pa_sink_new_data_set_port(data, p->name);
308 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
310 pa_namereg_unregister(core, name);
314 s->parent.parent.free = sink_free;
315 s->parent.process_msg = pa_sink_process_msg;
318 s->state = PA_SINK_INIT;
321 s->suspend_cause = data->suspend_cause;
322 pa_sink_set_mixer_dirty(s, false);
323 s->name = pa_xstrdup(name);
324 s->proplist = pa_proplist_copy(data->proplist);
325 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
326 s->module = data->module;
327 s->card = data->card;
329 s->priority = pa_device_init_priority(s->proplist);
331 s->sample_spec = data->sample_spec;
332 s->channel_map = data->channel_map;
333 s->default_sample_rate = s->sample_spec.rate;
335 if (data->alternate_sample_rate_is_set)
336 s->alternate_sample_rate = data->alternate_sample_rate;
338 s->alternate_sample_rate = s->core->alternate_sample_rate;
340 if (s->sample_spec.rate == s->alternate_sample_rate) {
341 pa_log_warn("Default and alternate sample rates are the same.");
342 s->alternate_sample_rate = 0;
345 s->inputs = pa_idxset_new(NULL, NULL);
347 s->input_to_master = NULL;
349 s->reference_volume = s->real_volume = data->volume;
350 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
351 s->base_volume = PA_VOLUME_NORM;
352 s->n_volume_steps = PA_VOLUME_NORM+1;
353 s->muted = data->muted;
354 s->refresh_volume = s->refresh_muted = false;
361 /* As a minor optimization we just steal the list instead of
363 s->ports = data->ports;
366 s->active_port = NULL;
367 s->save_port = false;
369 if (data->active_port)
370 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
371 s->save_port = data->save_port;
373 /* Hopefully the active port has already been assigned in the previous call
374 to pa_device_port_find_best, but better safe than sorry */
376 s->active_port = pa_device_port_find_best(s->ports);
379 s->port_latency_offset = s->active_port->latency_offset;
381 s->port_latency_offset = 0;
383 s->save_volume = data->save_volume;
384 s->save_muted = data->save_muted;
385 #ifdef TIZEN_PCM_DUMP
386 s->pcm_dump_fp = NULL;
390 pa_silence_memchunk_get(
391 &core->silence_cache,
397 s->thread_info.rtpoll = NULL;
398 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
399 (pa_free_cb_t) pa_sink_input_unref);
400 s->thread_info.soft_volume = s->soft_volume;
401 s->thread_info.soft_muted = s->muted;
402 s->thread_info.state = s->state;
403 s->thread_info.rewind_nbytes = 0;
404 s->thread_info.rewind_requested = false;
405 s->thread_info.max_rewind = 0;
406 s->thread_info.max_request = 0;
407 s->thread_info.requested_latency_valid = false;
408 s->thread_info.requested_latency = 0;
409 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
410 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
411 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
413 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
414 s->thread_info.volume_changes_tail = NULL;
415 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
416 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
417 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
418 s->thread_info.port_latency_offset = s->port_latency_offset;
420 /* FIXME: This should probably be moved to pa_sink_put() */
421 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
424 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
426 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
427 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
430 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
431 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
435 pa_source_new_data_init(&source_data);
436 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
437 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
438 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
439 source_data.name = pa_sprintf_malloc("%s.monitor", name);
440 source_data.driver = data->driver;
441 source_data.module = data->module;
442 source_data.card = data->card;
444 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
445 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
446 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
448 s->monitor_source = pa_source_new(core, &source_data,
449 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
450 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
452 pa_source_new_data_done(&source_data);
454 if (!s->monitor_source) {
460 s->monitor_source->monitor_of = s;
462 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
463 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
464 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
469 /* Called from main context */
470 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
473 pa_sink_state_t original_state;
476 pa_assert_ctl_context();
478 if (s->state == state)
481 original_state = s->state;
484 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
485 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
488 if ((ret = s->set_state(s, state)) < 0)
492 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
495 s->set_state(s, original_state);
500 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
502 #ifdef TIZEN_PCM_DUMP
503 /* close file for dump pcm */
504 if (s->pcm_dump_fp && (s->core->pcm_dump_option & PA_PCM_DUMP_OPTION_SEPARATED) &&
505 state == PA_SINK_IDLE && original_state == PA_SINK_RUNNING) {
506 fclose(s->pcm_dump_fp);
507 pa_log_info("%s closed", s->dump_path);
508 pa_xfree(s->dump_path);
509 s->pcm_dump_fp = NULL;
513 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
514 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
515 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
518 if (suspend_change) {
522 /* We're suspending or resuming, tell everyone about it */
524 PA_IDXSET_FOREACH(i, s->inputs, idx)
525 if (s->state == PA_SINK_SUSPENDED &&
526 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
527 pa_sink_input_kill(i);
529 i->suspend(i, state == PA_SINK_SUSPENDED);
531 if (s->monitor_source)
532 pa_source_sync_suspend(s->monitor_source);
538 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
544 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
545 pa_sink_flags_t flags;
548 pa_assert(!s->write_volume || cb);
552 /* Save the current flags so we can tell if they've changed */
556 /* The sink implementor is responsible for setting decibel volume support */
557 s->flags |= PA_SINK_HW_VOLUME_CTRL;
559 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
560 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
561 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
564 /* If the flags have changed after init, let any clients know via a change event */
565 if (s->state != PA_SINK_INIT && flags != s->flags)
566 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
569 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
570 pa_sink_flags_t flags;
573 pa_assert(!cb || s->set_volume);
575 s->write_volume = cb;
577 /* Save the current flags so we can tell if they've changed */
581 s->flags |= PA_SINK_DEFERRED_VOLUME;
583 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
585 /* If the flags have changed after init, let any clients know via a change event */
586 if (s->state != PA_SINK_INIT && flags != s->flags)
587 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
590 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
596 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
597 pa_sink_flags_t flags;
603 /* Save the current flags so we can tell if they've changed */
607 s->flags |= PA_SINK_HW_MUTE_CTRL;
609 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
611 /* If the flags have changed after init, let any clients know via a change event */
612 if (s->state != PA_SINK_INIT && flags != s->flags)
613 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
616 static void enable_flat_volume(pa_sink *s, bool enable) {
617 pa_sink_flags_t flags;
621 /* Always follow the overall user preference here */
622 enable = enable && s->core->flat_volumes;
624 /* Save the current flags so we can tell if they've changed */
628 s->flags |= PA_SINK_FLAT_VOLUME;
630 s->flags &= ~PA_SINK_FLAT_VOLUME;
632 /* If the flags have changed after init, let any clients know via a change event */
633 if (s->state != PA_SINK_INIT && flags != s->flags)
634 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
637 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
638 pa_sink_flags_t flags;
642 /* Save the current flags so we can tell if they've changed */
646 s->flags |= PA_SINK_DECIBEL_VOLUME;
647 enable_flat_volume(s, true);
649 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
650 enable_flat_volume(s, false);
653 /* If the flags have changed after init, let any clients know via a change event */
654 if (s->state != PA_SINK_INIT && flags != s->flags)
655 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
658 /* Called from main context */
659 void pa_sink_put(pa_sink* s) {
660 pa_sink_assert_ref(s);
661 pa_assert_ctl_context();
663 pa_assert(s->state == PA_SINK_INIT);
664 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
666 /* The following fields must be initialized properly when calling _put() */
667 pa_assert(s->asyncmsgq);
668 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
670 /* Generally, flags should be initialized via pa_sink_new(). As a
671 * special exception we allow some volume related flags to be set
672 * between _new() and _put() by the callback setter functions above.
674 * Thus we implement a couple safeguards here which ensure the above
675 * setters were used (or at least the implementor made manual changes
676 * in a compatible way).
678 * Note: All of these flags set here can change over the life time
680 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
681 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
682 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
684 /* XXX: Currently decibel volume is disabled for all sinks that use volume
685 * sharing. When the master sink supports decibel volume, it would be good
686 * to have the flag also in the filter sink, but currently we don't do that
687 * so that the flags of the filter sink never change when it's moved from
688 * a master sink to another. One solution for this problem would be to
689 * remove user-visible volume altogether from filter sinks when volume
690 * sharing is used, but the current approach was easier to implement... */
691 /* We always support decibel volumes in software, otherwise we leave it to
692 * the sink implementor to set this flag as needed.
694 * Note: This flag can also change over the life time of the sink. */
695 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
696 pa_sink_enable_decibel_volume(s, true);
697 s->soft_volume = s->reference_volume;
700 /* If the sink implementor support DB volumes by itself, we should always
701 * try and enable flat volumes too */
702 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
703 enable_flat_volume(s, true);
705 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
706 pa_sink *root_sink = pa_sink_get_master(s);
708 pa_assert(root_sink);
710 s->reference_volume = root_sink->reference_volume;
711 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
713 s->real_volume = root_sink->real_volume;
714 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
716 /* We assume that if the sink implementor changed the default
717 * volume he did so in real_volume, because that is the usual
718 * place where he is supposed to place his changes. */
719 s->reference_volume = s->real_volume;
721 s->thread_info.soft_volume = s->soft_volume;
722 s->thread_info.soft_muted = s->muted;
723 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
725 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
726 || (s->base_volume == PA_VOLUME_NORM
727 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
728 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
729 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
730 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
731 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
733 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
734 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
735 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
737 if (s->suspend_cause)
738 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
740 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
742 pa_source_put(s->monitor_source);
744 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
745 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
747 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
748 * because module-switch-on-connect needs to know the old default sink */
749 pa_core_update_default_sink(s->core);
752 /* Called from main context */
753 void pa_sink_unlink(pa_sink* s) {
755 pa_sink_input *i, PA_UNUSED *j = NULL;
757 pa_sink_assert_ref(s);
758 pa_assert_ctl_context();
760 /* Please note that pa_sink_unlink() does more than simply
761 * reversing pa_sink_put(). It also undoes the registrations
762 * already done in pa_sink_new()! */
764 if (s->unlink_requested)
767 s->unlink_requested = true;
769 linked = PA_SINK_IS_LINKED(s->state);
772 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
774 if (s->state != PA_SINK_UNLINKED)
775 pa_namereg_unregister(s->core, s->name);
776 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
778 pa_core_update_default_sink(s->core);
781 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
783 while ((i = pa_idxset_first(s->inputs, NULL))) {
785 pa_sink_input_kill(i);
790 sink_set_state(s, PA_SINK_UNLINKED);
792 s->state = PA_SINK_UNLINKED;
796 if (s->monitor_source)
797 pa_source_unlink(s->monitor_source);
800 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
801 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
805 /* Called from main context */
806 static void sink_free(pa_object *o) {
807 pa_sink *s = PA_SINK(o);
810 pa_assert_ctl_context();
811 pa_assert(pa_sink_refcnt(s) == 0);
812 pa_assert(!PA_SINK_IS_LINKED(s->state));
814 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
816 pa_sink_volume_change_flush(s);
818 if (s->monitor_source) {
819 pa_source_unref(s->monitor_source);
820 s->monitor_source = NULL;
823 pa_idxset_free(s->inputs, NULL);
824 pa_hashmap_free(s->thread_info.inputs);
826 if (s->silence.memblock)
827 pa_memblock_unref(s->silence.memblock);
833 pa_proplist_free(s->proplist);
836 pa_hashmap_free(s->ports);
838 #ifdef TIZEN_PCM_DUMP
839 /* close file for dump pcm */
840 if (s->pcm_dump_fp) {
841 fclose(s->pcm_dump_fp);
842 pa_log_info("%s closed", s->dump_path);
843 pa_xfree(s->dump_path);
844 s->pcm_dump_fp = NULL;
850 /* Called from main context, and not while the IO thread is active, please */
851 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
852 pa_sink_assert_ref(s);
853 pa_assert_ctl_context();
857 if (s->monitor_source)
858 pa_source_set_asyncmsgq(s->monitor_source, q);
861 /* Called from main context, and not while the IO thread is active, please */
862 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
863 pa_sink_flags_t old_flags;
864 pa_sink_input *input;
867 pa_sink_assert_ref(s);
868 pa_assert_ctl_context();
870 /* For now, allow only a minimal set of flags to be changed. */
871 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
873 old_flags = s->flags;
874 s->flags = (s->flags & ~mask) | (value & mask);
876 if (s->flags == old_flags)
879 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
880 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
882 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
883 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
884 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
886 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
887 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
889 if (s->monitor_source)
890 pa_source_update_flags(s->monitor_source,
891 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
892 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
893 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
894 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
896 PA_IDXSET_FOREACH(input, s->inputs, idx) {
897 if (input->origin_sink)
898 pa_sink_update_flags(input->origin_sink, mask, value);
902 /* Called from IO context, or before _put() from main context */
903 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
904 pa_sink_assert_ref(s);
905 pa_sink_assert_io_context(s);
907 s->thread_info.rtpoll = p;
909 if (s->monitor_source)
910 pa_source_set_rtpoll(s->monitor_source, p);
913 /* Called from main context */
914 int pa_sink_update_status(pa_sink*s) {
915 pa_sink_assert_ref(s);
916 pa_assert_ctl_context();
917 pa_assert(PA_SINK_IS_LINKED(s->state));
919 if (s->state == PA_SINK_SUSPENDED)
922 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
925 /* Called from any context - must be threadsafe */
926 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
927 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
930 /* Called from main context */
931 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
932 pa_sink_assert_ref(s);
933 pa_assert_ctl_context();
934 pa_assert(PA_SINK_IS_LINKED(s->state));
935 pa_assert(cause != 0);
938 s->suspend_cause |= cause;
939 s->monitor_source->suspend_cause |= cause;
941 s->suspend_cause &= ~cause;
942 s->monitor_source->suspend_cause &= ~cause;
945 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
946 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
947 it'll be handled just fine. */
948 pa_sink_set_mixer_dirty(s, false);
949 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
950 if (s->active_port && s->set_port) {
951 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
952 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
953 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
956 s->set_port(s, s->active_port);
966 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
969 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
971 if (s->suspend_cause)
972 return sink_set_state(s, PA_SINK_SUSPENDED);
974 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
977 /* Called from main context */
978 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
979 pa_sink_input *i, *n;
982 pa_sink_assert_ref(s);
983 pa_assert_ctl_context();
984 pa_assert(PA_SINK_IS_LINKED(s->state));
989 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
990 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
992 pa_sink_input_ref(i);
994 if (pa_sink_input_start_move(i) >= 0)
997 pa_sink_input_unref(i);
1003 /* Called from main context */
1004 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1007 pa_sink_assert_ref(s);
1008 pa_assert_ctl_context();
1009 pa_assert(PA_SINK_IS_LINKED(s->state));
1012 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1013 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1014 if (pa_sink_input_finish_move(i, s, save) < 0)
1015 pa_sink_input_fail_move(i);
1018 pa_sink_input_unref(i);
1021 pa_queue_free(q, NULL);
1024 /* Called from main context */
1025 void pa_sink_move_all_fail(pa_queue *q) {
1028 pa_assert_ctl_context();
1031 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1032 pa_sink_input_fail_move(i);
1033 pa_sink_input_unref(i);
1036 pa_queue_free(q, NULL);
1039 /* Called from IO thread context */
1040 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1045 pa_sink_assert_ref(s);
1046 pa_sink_assert_io_context(s);
1048 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1049 size_t uf = i->thread_info.underrun_for_sink;
1051 /* Propagate down the filter tree */
1052 if (i->origin_sink) {
1053 size_t filter_result, left_to_play_origin;
1055 /* The recursive call works in the origin sink domain ... */
1056 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1058 /* .. and returns the time to sleep before waking up. We need the
1059 * underrun duration for comparisons, so we undo the subtraction on
1060 * the return value... */
1061 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1063 /* ... and convert it back to the master sink domain */
1064 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1066 /* Remember the longest underrun so far */
1067 if (filter_result > result)
1068 result = filter_result;
1072 /* No underrun here, move on */
1074 } else if (uf >= left_to_play) {
1075 /* The sink has possibly consumed all the data the sink input provided */
1076 pa_sink_input_process_underrun(i);
1077 } else if (uf > result) {
1078 /* Remember the longest underrun so far */
1084 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1085 (long) result, (long) left_to_play - result);
1086 return left_to_play - result;
1089 /* Called from IO thread context */
1090 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1094 pa_sink_assert_ref(s);
1095 pa_sink_assert_io_context(s);
1096 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1098 /* If nobody requested this and this is actually no real rewind
1099 * then we can short cut this. Please note that this means that
1100 * not all rewind requests triggered upstream will always be
1101 * translated in actual requests! */
1102 if (!s->thread_info.rewind_requested && nbytes <= 0)
1105 s->thread_info.rewind_nbytes = 0;
1106 s->thread_info.rewind_requested = false;
1109 pa_log_debug("Processing rewind...");
1110 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1111 pa_sink_volume_change_rewind(s, nbytes);
1112 #ifdef TIZEN_PCM_DUMP
1115 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1119 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1120 pa_sink_input_assert_ref(i);
1121 pa_sink_input_process_rewind(i, nbytes);
1125 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1126 pa_source_process_rewind(s->monitor_source, nbytes);
1130 /* Called from IO thread context */
1131 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1135 size_t mixlength = *length;
1137 pa_sink_assert_ref(s);
1138 pa_sink_assert_io_context(s);
1141 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1142 pa_sink_input_assert_ref(i);
1144 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1146 if (mixlength == 0 || info->chunk.length < mixlength)
1147 mixlength = info->chunk.length;
1149 if (pa_memblock_is_silence(info->chunk.memblock)) {
1150 pa_memblock_unref(info->chunk.memblock);
1154 info->userdata = pa_sink_input_ref(i);
1156 pa_assert(info->chunk.memblock);
1157 pa_assert(info->chunk.length > 0);
1165 *length = mixlength;
1170 /* Called from IO thread context */
1171 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1175 unsigned n_unreffed = 0;
1177 pa_sink_assert_ref(s);
1178 pa_sink_assert_io_context(s);
1180 pa_assert(result->memblock);
1181 pa_assert(result->length > 0);
1183 /* We optimize for the case where the order of the inputs has not changed */
1185 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1187 pa_mix_info* m = NULL;
1189 pa_sink_input_assert_ref(i);
1191 /* Let's try to find the matching entry info the pa_mix_info array */
1192 for (j = 0; j < n; j ++) {
1194 if (info[p].userdata == i) {
1204 /* Drop read data */
1205 pa_sink_input_drop(i, result->length);
1207 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1209 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1210 void *ostate = NULL;
1211 pa_source_output *o;
1214 if (m && m->chunk.memblock) {
1216 pa_memblock_ref(c.memblock);
1217 pa_assert(result->length <= c.length);
1218 c.length = result->length;
1220 pa_memchunk_make_writable(&c, 0);
1221 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1224 pa_memblock_ref(c.memblock);
1225 pa_assert(result->length <= c.length);
1226 c.length = result->length;
1229 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1230 pa_source_output_assert_ref(o);
1231 pa_assert(o->direct_on_input == i);
1232 pa_source_post_direct(s->monitor_source, o, &c);
1235 pa_memblock_unref(c.memblock);
1240 if (m->chunk.memblock) {
1241 pa_memblock_unref(m->chunk.memblock);
1242 pa_memchunk_reset(&m->chunk);
1245 pa_sink_input_unref(m->userdata);
1252 /* Now drop references to entries that are included in the
1253 * pa_mix_info array but don't exist anymore */
1255 if (n_unreffed < n) {
1256 for (; n > 0; info++, n--) {
1258 pa_sink_input_unref(info->userdata);
1259 if (info->chunk.memblock)
1260 pa_memblock_unref(info->chunk.memblock);
1264 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1265 pa_source_post(s->monitor_source, result);
1268 /* Called from IO thread context */
1269 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1270 pa_mix_info info[MAX_MIX_CHANNELS];
1272 size_t block_size_max;
1274 pa_sink_assert_ref(s);
1275 pa_sink_assert_io_context(s);
1276 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1277 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1280 pa_assert(!s->thread_info.rewind_requested);
1281 pa_assert(s->thread_info.rewind_nbytes == 0);
1283 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1284 result->memblock = pa_memblock_ref(s->silence.memblock);
1285 result->index = s->silence.index;
1286 result->length = PA_MIN(s->silence.length, length);
1293 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1295 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1296 if (length > block_size_max)
1297 length = pa_frame_align(block_size_max, &s->sample_spec);
1299 pa_assert(length > 0);
1301 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1305 *result = s->silence;
1306 pa_memblock_ref(result->memblock);
1308 if (result->length > length)
1309 result->length = length;
1311 } else if (n == 1) {
1314 *result = info[0].chunk;
1315 pa_memblock_ref(result->memblock);
1317 if (result->length > length)
1318 result->length = length;
1320 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1322 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1323 pa_memblock_unref(result->memblock);
1324 pa_silence_memchunk_get(&s->core->silence_cache,
1329 } else if (!pa_cvolume_is_norm(&volume)) {
1330 pa_memchunk_make_writable(result, 0);
1331 pa_volume_memchunk(result, &s->sample_spec, &volume);
1335 result->memblock = pa_memblock_new(s->core->mempool, length);
1337 ptr = pa_memblock_acquire(result->memblock);
1338 result->length = pa_mix(info, n,
1341 &s->thread_info.soft_volume,
1342 s->thread_info.soft_muted);
1343 pa_memblock_release(result->memblock);
1348 inputs_drop(s, info, n, result);
1350 #ifdef TIZEN_PCM_DUMP
1351 pa_sink_write_pcm_dump(s, result);
1356 /* Called from IO thread context */
1357 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1358 pa_mix_info info[MAX_MIX_CHANNELS];
1360 size_t length, block_size_max;
1362 pa_sink_assert_ref(s);
1363 pa_sink_assert_io_context(s);
1364 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1366 pa_assert(target->memblock);
1367 pa_assert(target->length > 0);
1368 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1370 pa_assert(!s->thread_info.rewind_requested);
1371 pa_assert(s->thread_info.rewind_nbytes == 0);
1373 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1374 pa_silence_memchunk(target, &s->sample_spec);
1380 length = target->length;
1381 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1382 if (length > block_size_max)
1383 length = pa_frame_align(block_size_max, &s->sample_spec);
1385 pa_assert(length > 0);
1387 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1390 if (target->length > length)
1391 target->length = length;
1393 pa_silence_memchunk(target, &s->sample_spec);
1394 } else if (n == 1) {
1397 if (target->length > length)
1398 target->length = length;
1400 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1402 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1403 pa_silence_memchunk(target, &s->sample_spec);
1407 vchunk = info[0].chunk;
1408 pa_memblock_ref(vchunk.memblock);
1410 if (vchunk.length > length)
1411 vchunk.length = length;
1413 if (!pa_cvolume_is_norm(&volume)) {
1414 pa_memchunk_make_writable(&vchunk, 0);
1415 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1418 pa_memchunk_memcpy(target, &vchunk);
1419 pa_memblock_unref(vchunk.memblock);
1425 ptr = pa_memblock_acquire(target->memblock);
1427 target->length = pa_mix(info, n,
1428 (uint8_t*) ptr + target->index, length,
1430 &s->thread_info.soft_volume,
1431 s->thread_info.soft_muted);
1433 pa_memblock_release(target->memblock);
1436 inputs_drop(s, info, n, target);
1438 #ifdef TIZEN_PCM_DUMP
1439 pa_sink_write_pcm_dump(s, target);
1444 /* Called from IO thread context */
1445 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1449 pa_sink_assert_ref(s);
1450 pa_sink_assert_io_context(s);
1451 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1453 pa_assert(target->memblock);
1454 pa_assert(target->length > 0);
1455 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1457 pa_assert(!s->thread_info.rewind_requested);
1458 pa_assert(s->thread_info.rewind_nbytes == 0);
1460 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1461 pa_silence_memchunk(target, &s->sample_spec);
1474 pa_sink_render_into(s, &chunk);
1483 /* Called from IO thread context */
1484 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1485 pa_sink_assert_ref(s);
1486 pa_sink_assert_io_context(s);
1487 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1488 pa_assert(length > 0);
1489 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1492 pa_assert(!s->thread_info.rewind_requested);
1493 pa_assert(s->thread_info.rewind_nbytes == 0);
1497 pa_sink_render(s, length, result);
1499 if (result->length < length) {
1502 pa_memchunk_make_writable(result, length);
1504 chunk.memblock = result->memblock;
1505 chunk.index = result->index + result->length;
1506 chunk.length = length - result->length;
1508 pa_sink_render_into_full(s, &chunk);
1510 result->length = length;
1516 /* Called from main thread */
1517 int pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1519 pa_sample_spec desired_spec;
1520 uint32_t default_rate = s->default_sample_rate;
1521 uint32_t alternate_rate = s->alternate_sample_rate;
1524 bool default_rate_is_usable = false;
1525 bool alternate_rate_is_usable = false;
1526 bool avoid_resampling = s->core->avoid_resampling;
1528 /* We currently only try to reconfigure the sample rate */
1530 if (pa_sample_spec_equal(spec, &s->sample_spec))
1533 if (!s->reconfigure)
1536 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1537 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1541 if (PA_SINK_IS_RUNNING(s->state)) {
1542 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1543 s->sample_spec.rate);
1547 if (s->monitor_source) {
1548 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1549 pa_log_info("Cannot update rate, monitor source is RUNNING");
1554 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1557 desired_spec = s->sample_spec;
1560 /* We have to try to use the sink input rate */
1561 desired_spec.rate = spec->rate;
1563 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1564 /* We just try to set the sink input's sample rate if it's not too low */
1565 desired_spec.rate = spec->rate;
1567 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1568 /* We can directly try to use this rate */
1569 desired_spec.rate = spec->rate;
1572 /* See if we can pick a rate that results in less resampling effort */
1573 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1574 default_rate_is_usable = true;
1575 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1576 default_rate_is_usable = true;
1577 if (alternate_rate && alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1578 alternate_rate_is_usable = true;
1579 if (alternate_rate && alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1580 alternate_rate_is_usable = true;
1582 if (alternate_rate_is_usable && !default_rate_is_usable)
1583 desired_spec.rate = alternate_rate;
1585 desired_spec.rate = default_rate;
1588 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1591 if (!passthrough && pa_sink_used_by(s) > 0)
1594 pa_log_debug("Suspending sink %s due to changing format.", s->name);
1595 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1597 if (s->reconfigure(s, &desired_spec, passthrough) >= 0) {
1598 /* update monitor source as well */
1599 if (s->monitor_source && !passthrough)
1600 pa_source_reconfigure(s->monitor_source, &desired_spec, false);
1601 pa_log_info("Changed format successfully");
1603 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1604 if (i->state == PA_SINK_INPUT_CORKED)
1605 pa_sink_input_update_rate(i);
1611 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1616 /* Called from main thread */
1617 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1620 pa_sink_assert_ref(s);
1621 pa_assert_ctl_context();
1622 pa_assert(PA_SINK_IS_LINKED(s->state));
1624 /* The returned value is supposed to be in the time domain of the sound card! */
1626 if (s->state == PA_SINK_SUSPENDED)
1629 if (!(s->flags & PA_SINK_LATENCY))
1632 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1634 /* the return value is unsigned, so check that the offset can be added to usec without
1636 if (-s->port_latency_offset <= usec)
1637 usec += s->port_latency_offset;
1641 return (pa_usec_t)usec;
1644 /* Called from IO thread */
1645 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1649 pa_sink_assert_ref(s);
1650 pa_sink_assert_io_context(s);
1651 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1653 /* The returned value is supposed to be in the time domain of the sound card! */
1655 if (s->thread_info.state == PA_SINK_SUSPENDED)
1658 if (!(s->flags & PA_SINK_LATENCY))
1661 o = PA_MSGOBJECT(s);
1663 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1665 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1667 /* If allow_negative is false, the call should only return positive values, */
1668 usec += s->thread_info.port_latency_offset;
1669 if (!allow_negative && usec < 0)
1675 /* Called from the main thread (and also from the IO thread while the main
1676 * thread is waiting).
1678 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1679 * set. Instead, flat volume mode is detected by checking whether the root sink
1680 * has the flag set. */
1681 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1682 pa_sink_assert_ref(s);
1684 s = pa_sink_get_master(s);
1687 return (s->flags & PA_SINK_FLAT_VOLUME);
1692 /* Called from the main thread (and also from the IO thread while the main
1693 * thread is waiting). */
1694 pa_sink *pa_sink_get_master(pa_sink *s) {
1695 pa_sink_assert_ref(s);
1697 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1698 if (PA_UNLIKELY(!s->input_to_master))
1701 s = s->input_to_master->sink;
1707 /* Called from main context */
1708 bool pa_sink_is_filter(pa_sink *s) {
1709 pa_sink_assert_ref(s);
1711 return (s->input_to_master != NULL);
1714 /* Called from main context */
1715 bool pa_sink_is_passthrough(pa_sink *s) {
1716 pa_sink_input *alt_i;
1719 pa_sink_assert_ref(s);
1721 /* one and only one PASSTHROUGH input can possibly be connected */
1722 if (pa_idxset_size(s->inputs) == 1) {
1723 alt_i = pa_idxset_first(s->inputs, &idx);
1725 if (pa_sink_input_is_passthrough(alt_i))
1732 /* Called from main context */
1733 void pa_sink_enter_passthrough(pa_sink *s) {
1736 /* The sink implementation is reconfigured for passthrough in
1737 * pa_sink_reconfigure(). This function sets the PA core objects to
1738 * passthrough mode. */
1740 /* disable the monitor in passthrough mode */
1741 if (s->monitor_source) {
1742 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1743 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1746 /* set the volume to NORM */
1747 s->saved_volume = *pa_sink_get_volume(s, true);
1748 s->saved_save_volume = s->save_volume;
1750 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1751 pa_sink_set_volume(s, &volume, true, false);
1753 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1756 /* Called from main context */
1757 void pa_sink_leave_passthrough(pa_sink *s) {
1758 /* Unsuspend monitor */
1759 if (s->monitor_source) {
1760 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1761 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1764 /* Restore sink volume to what it was before we entered passthrough mode */
1765 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1767 pa_cvolume_init(&s->saved_volume);
1768 s->saved_save_volume = false;
1772 /* Called from main context. */
1773 static void compute_reference_ratio(pa_sink_input *i) {
1775 pa_cvolume remapped;
1779 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1782 * Calculates the reference ratio from the sink's reference
1783 * volume. This basically calculates:
1785 * i->reference_ratio = i->volume / i->sink->reference_volume
1788 remapped = i->sink->reference_volume;
1789 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1791 ratio = i->reference_ratio;
1793 for (c = 0; c < i->sample_spec.channels; c++) {
1795 /* We don't update when the sink volume is 0 anyway */
1796 if (remapped.values[c] <= PA_VOLUME_MUTED)
1799 /* Don't update the reference ratio unless necessary */
1800 if (pa_sw_volume_multiply(
1802 remapped.values[c]) == i->volume.values[c])
1805 ratio.values[c] = pa_sw_volume_divide(
1806 i->volume.values[c],
1807 remapped.values[c]);
1810 pa_sink_input_set_reference_ratio(i, &ratio);
1813 /* Called from main context. Only called for the root sink in volume sharing
1814 * cases, except for internal recursive calls. */
1815 static void compute_reference_ratios(pa_sink *s) {
1819 pa_sink_assert_ref(s);
1820 pa_assert_ctl_context();
1821 pa_assert(PA_SINK_IS_LINKED(s->state));
1822 pa_assert(pa_sink_flat_volume_enabled(s));
1824 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1825 compute_reference_ratio(i);
1827 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1828 && PA_SINK_IS_LINKED(i->origin_sink->state))
1829 compute_reference_ratios(i->origin_sink);
1833 /* Called from main context. Only called for the root sink in volume sharing
1834 * cases, except for internal recursive calls. */
1835 static void compute_real_ratios(pa_sink *s) {
1839 pa_sink_assert_ref(s);
1840 pa_assert_ctl_context();
1841 pa_assert(PA_SINK_IS_LINKED(s->state));
1842 pa_assert(pa_sink_flat_volume_enabled(s));
1844 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1846 pa_cvolume remapped;
1848 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1849 /* The origin sink uses volume sharing, so this input's real ratio
1850 * is handled as a special case - the real ratio must be 0 dB, and
1851 * as a result i->soft_volume must equal i->volume_factor. */
1852 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1853 i->soft_volume = i->volume_factor;
1855 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1856 compute_real_ratios(i->origin_sink);
1862 * This basically calculates:
1864 * i->real_ratio := i->volume / s->real_volume
1865 * i->soft_volume := i->real_ratio * i->volume_factor
1868 remapped = s->real_volume;
1869 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1871 i->real_ratio.channels = i->sample_spec.channels;
1872 i->soft_volume.channels = i->sample_spec.channels;
1874 for (c = 0; c < i->sample_spec.channels; c++) {
1876 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1877 /* We leave i->real_ratio untouched */
1878 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1882 /* Don't lose accuracy unless necessary */
1883 if (pa_sw_volume_multiply(
1884 i->real_ratio.values[c],
1885 remapped.values[c]) != i->volume.values[c])
1887 i->real_ratio.values[c] = pa_sw_volume_divide(
1888 i->volume.values[c],
1889 remapped.values[c]);
1891 i->soft_volume.values[c] = pa_sw_volume_multiply(
1892 i->real_ratio.values[c],
1893 i->volume_factor.values[c]);
1896 /* We don't copy the soft_volume to the thread_info data
1897 * here. That must be done by the caller */
1901 static pa_cvolume *cvolume_remap_minimal_impact(
1903 const pa_cvolume *template,
1904 const pa_channel_map *from,
1905 const pa_channel_map *to) {
1910 pa_assert(template);
1913 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1914 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1916 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1917 * mapping from sink input to sink volumes:
1919 * If template is a possible remapping from v it is used instead
1920 * of remapping anew.
1922 * If the channel maps don't match we set an all-channel volume on
1923 * the sink to ensure that changing a volume on one stream has no
1924 * effect that cannot be compensated for in another stream that
1925 * does not have the same channel map as the sink. */
1927 if (pa_channel_map_equal(from, to))
1931 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1936 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1940 /* Called from main thread. Only called for the root sink in volume sharing
1941 * cases, except for internal recursive calls. */
1942 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1946 pa_sink_assert_ref(s);
1947 pa_assert(max_volume);
1948 pa_assert(channel_map);
1949 pa_assert(pa_sink_flat_volume_enabled(s));
1951 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1952 pa_cvolume remapped;
1954 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1955 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1956 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1958 /* Ignore this input. The origin sink uses volume sharing, so this
1959 * input's volume will be set to be equal to the root sink's real
1960 * volume. Obviously this input's current volume must not then
1961 * affect what the root sink's real volume will be. */
1965 remapped = i->volume;
1966 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1967 pa_cvolume_merge(max_volume, max_volume, &remapped);
1971 /* Called from main thread. Only called for the root sink in volume sharing
1972 * cases, except for internal recursive calls. */
1973 static bool has_inputs(pa_sink *s) {
1977 pa_sink_assert_ref(s);
1979 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1980 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1987 /* Called from main thread. Only called for the root sink in volume sharing
1988 * cases, except for internal recursive calls. */
1989 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1993 pa_sink_assert_ref(s);
1994 pa_assert(new_volume);
1995 pa_assert(channel_map);
1997 s->real_volume = *new_volume;
1998 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2000 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2001 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2002 if (pa_sink_flat_volume_enabled(s)) {
2003 pa_cvolume new_input_volume;
2005 /* Follow the root sink's real volume. */
2006 new_input_volume = *new_volume;
2007 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2008 pa_sink_input_set_volume_direct(i, &new_input_volume);
2009 compute_reference_ratio(i);
2012 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2013 update_real_volume(i->origin_sink, new_volume, channel_map);
2018 /* Called from main thread. Only called for the root sink in shared volume
2020 static void compute_real_volume(pa_sink *s) {
2021 pa_sink_assert_ref(s);
2022 pa_assert_ctl_context();
2023 pa_assert(PA_SINK_IS_LINKED(s->state));
2024 pa_assert(pa_sink_flat_volume_enabled(s));
2025 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2027 /* This determines the maximum volume of all streams and sets
2028 * s->real_volume accordingly. */
2030 if (!has_inputs(s)) {
2031 /* In the special case that we have no sink inputs we leave the
2032 * volume unmodified. */
2033 update_real_volume(s, &s->reference_volume, &s->channel_map);
2037 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2039 /* First let's determine the new maximum volume of all inputs
2040 * connected to this sink */
2041 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2042 update_real_volume(s, &s->real_volume, &s->channel_map);
2044 /* Then, let's update the real ratios/soft volumes of all inputs
2045 * connected to this sink */
2046 compute_real_ratios(s);
2049 /* Called from main thread. Only called for the root sink in shared volume
2050 * cases, except for internal recursive calls. */
2051 static void propagate_reference_volume(pa_sink *s) {
2055 pa_sink_assert_ref(s);
2056 pa_assert_ctl_context();
2057 pa_assert(PA_SINK_IS_LINKED(s->state));
2058 pa_assert(pa_sink_flat_volume_enabled(s));
2060 /* This is called whenever the sink volume changes that is not
2061 * caused by a sink input volume change. We need to fix up the
2062 * sink input volumes accordingly */
2064 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2065 pa_cvolume new_volume;
2067 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2068 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2069 propagate_reference_volume(i->origin_sink);
2071 /* Since the origin sink uses volume sharing, this input's volume
2072 * needs to be updated to match the root sink's real volume, but
2073 * that will be done later in update_real_volume(). */
2077 /* This basically calculates:
2079 * i->volume := s->reference_volume * i->reference_ratio */
2081 new_volume = s->reference_volume;
2082 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2083 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2084 pa_sink_input_set_volume_direct(i, &new_volume);
2088 /* Called from main thread. Only called for the root sink in volume sharing
2089 * cases, except for internal recursive calls. The return value indicates
2090 * whether any reference volume actually changed. */
2091 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2093 bool reference_volume_changed;
2097 pa_sink_assert_ref(s);
2098 pa_assert(PA_SINK_IS_LINKED(s->state));
2100 pa_assert(channel_map);
2101 pa_assert(pa_cvolume_valid(v));
2104 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2106 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2107 pa_sink_set_reference_volume_direct(s, &volume);
2109 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2111 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2112 /* If the root sink's volume doesn't change, then there can't be any
2113 * changes in the other sinks in the sink tree either.
2115 * It's probably theoretically possible that even if the root sink's
2116 * volume changes slightly, some filter sink doesn't change its volume
2117 * due to rounding errors. If that happens, we still want to propagate
2118 * the changed root sink volume to the sinks connected to the
2119 * intermediate sink that didn't change its volume. This theoretical
2120 * possibility is the reason why we have that !(s->flags &
2121 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2122 * notice even if we returned here false always if
2123 * reference_volume_changed is false. */
2126 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2127 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2128 && PA_SINK_IS_LINKED(i->origin_sink->state))
2129 update_reference_volume(i->origin_sink, v, channel_map, false);
2135 /* Called from main thread */
2136 void pa_sink_set_volume(
2138 const pa_cvolume *volume,
2142 pa_cvolume new_reference_volume;
2145 pa_sink_assert_ref(s);
2146 pa_assert_ctl_context();
2147 pa_assert(PA_SINK_IS_LINKED(s->state));
2148 pa_assert(!volume || pa_cvolume_valid(volume));
2149 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2150 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2152 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2153 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2154 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2155 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2159 /* In case of volume sharing, the volume is set for the root sink first,
2160 * from which it's then propagated to the sharing sinks. */
2161 root_sink = pa_sink_get_master(s);
2163 if (PA_UNLIKELY(!root_sink))
2166 /* As a special exception we accept mono volumes on all sinks --
2167 * even on those with more complex channel maps */
2170 if (pa_cvolume_compatible(volume, &s->sample_spec))
2171 new_reference_volume = *volume;
2173 new_reference_volume = s->reference_volume;
2174 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2177 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2179 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2180 if (pa_sink_flat_volume_enabled(root_sink)) {
2181 /* OK, propagate this volume change back to the inputs */
2182 propagate_reference_volume(root_sink);
2184 /* And now recalculate the real volume */
2185 compute_real_volume(root_sink);
2187 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2191 /* If volume is NULL we synchronize the sink's real and
2192 * reference volumes with the stream volumes. */
2194 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2196 /* Ok, let's determine the new real volume */
2197 compute_real_volume(root_sink);
2199 /* Let's 'push' the reference volume if necessary */
2200 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2201 /* If the sink and its root don't have the same number of channels, we need to remap */
2202 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2203 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2204 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2206 /* Now that the reference volume is updated, we can update the streams'
2207 * reference ratios. */
2208 compute_reference_ratios(root_sink);
2211 if (root_sink->set_volume) {
2212 /* If we have a function set_volume(), then we do not apply a
2213 * soft volume by default. However, set_volume() is free to
2214 * apply one to root_sink->soft_volume */
2216 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2217 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2218 root_sink->set_volume(root_sink);
2221 /* If we have no function set_volume(), then the soft volume
2222 * becomes the real volume */
2223 root_sink->soft_volume = root_sink->real_volume;
2225 /* This tells the sink that soft volume and/or real volume changed */
2227 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2230 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2231 * Only to be called by sink implementor */
2232 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2234 pa_sink_assert_ref(s);
2235 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2237 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2238 pa_sink_assert_io_context(s);
2240 pa_assert_ctl_context();
2243 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2245 s->soft_volume = *volume;
2247 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2248 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2250 s->thread_info.soft_volume = s->soft_volume;
2253 /* Called from the main thread. Only called for the root sink in volume sharing
2254 * cases, except for internal recursive calls. */
2255 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2259 pa_sink_assert_ref(s);
2260 pa_assert(old_real_volume);
2261 pa_assert_ctl_context();
2262 pa_assert(PA_SINK_IS_LINKED(s->state));
2264 /* This is called when the hardware's real volume changes due to
2265 * some external event. We copy the real volume into our
2266 * reference volume and then rebuild the stream volumes based on
2267 * i->real_ratio which should stay fixed. */
2269 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2270 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2273 /* 1. Make the real volume the reference volume */
2274 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2277 if (pa_sink_flat_volume_enabled(s)) {
2279 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2280 pa_cvolume new_volume;
2282 /* 2. Since the sink's reference and real volumes are equal
2283 * now our ratios should be too. */
2284 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2286 /* 3. Recalculate the new stream reference volume based on the
2287 * reference ratio and the sink's reference volume.
2289 * This basically calculates:
2291 * i->volume = s->reference_volume * i->reference_ratio
2293 * This is identical to propagate_reference_volume() */
2294 new_volume = s->reference_volume;
2295 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2296 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2297 pa_sink_input_set_volume_direct(i, &new_volume);
2299 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2300 && PA_SINK_IS_LINKED(i->origin_sink->state))
2301 propagate_real_volume(i->origin_sink, old_real_volume);
2305 /* Something got changed in the hardware. It probably makes sense
2306 * to save changed hw settings given that hw volume changes not
2307 * triggered by PA are almost certainly done by the user. */
2308 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2309 s->save_volume = true;
2312 /* Called from io thread */
2313 void pa_sink_update_volume_and_mute(pa_sink *s) {
2315 pa_sink_assert_io_context(s);
2317 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2320 /* Called from main thread */
2321 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2322 pa_sink_assert_ref(s);
2323 pa_assert_ctl_context();
2324 pa_assert(PA_SINK_IS_LINKED(s->state));
2326 if (s->refresh_volume || force_refresh) {
2327 struct pa_cvolume old_real_volume;
2329 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2331 old_real_volume = s->real_volume;
2333 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2336 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2338 update_real_volume(s, &s->real_volume, &s->channel_map);
2339 propagate_real_volume(s, &old_real_volume);
2342 return &s->reference_volume;
2345 /* Called from main thread. In volume sharing cases, only the root sink may
2347 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2348 pa_cvolume old_real_volume;
2350 pa_sink_assert_ref(s);
2351 pa_assert_ctl_context();
2352 pa_assert(PA_SINK_IS_LINKED(s->state));
2353 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2355 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2357 old_real_volume = s->real_volume;
2358 update_real_volume(s, new_real_volume, &s->channel_map);
2359 propagate_real_volume(s, &old_real_volume);
2362 /* Called from main thread */
2363 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2366 pa_sink_assert_ref(s);
2367 pa_assert_ctl_context();
2369 old_muted = s->muted;
2371 if (mute == old_muted) {
2372 s->save_muted |= save;
2377 s->save_muted = save;
2379 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2380 s->set_mute_in_progress = true;
2382 s->set_mute_in_progress = false;
2385 if (!PA_SINK_IS_LINKED(s->state))
2388 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2389 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2394 /* Called from main thread */
2395 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2397 pa_sink_assert_ref(s);
2398 pa_assert_ctl_context();
2399 pa_assert(PA_SINK_IS_LINKED(s->state));
2401 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2404 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2405 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2406 pa_sink_mute_changed(s, mute);
2408 if (s->get_mute(s, &mute) >= 0)
2409 pa_sink_mute_changed(s, mute);
2416 /* Called from main thread */
2417 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2418 pa_sink_assert_ref(s);
2419 pa_assert_ctl_context();
2420 pa_assert(PA_SINK_IS_LINKED(s->state));
2422 if (s->set_mute_in_progress)
2425 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2426 * but we must have this here also, because the save parameter of
2427 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2428 * the mute state when it shouldn't be saved). */
2429 if (new_muted == s->muted)
2432 pa_sink_set_mute(s, new_muted, true);
2435 /* Called from main thread */
2436 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2437 pa_sink_assert_ref(s);
2438 pa_assert_ctl_context();
2441 pa_proplist_update(s->proplist, mode, p);
2443 if (PA_SINK_IS_LINKED(s->state)) {
2444 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2445 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2451 /* Called from main thread */
2452 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2453 void pa_sink_set_description(pa_sink *s, const char *description) {
2455 pa_sink_assert_ref(s);
2456 pa_assert_ctl_context();
2458 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2461 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2463 if (old && description && pa_streq(old, description))
2467 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2469 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2471 if (s->monitor_source) {
2474 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2475 pa_source_set_description(s->monitor_source, n);
2479 if (PA_SINK_IS_LINKED(s->state)) {
2480 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2481 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2485 /* Called from main thread */
2486 unsigned pa_sink_linked_by(pa_sink *s) {
2489 pa_sink_assert_ref(s);
2490 pa_assert_ctl_context();
2491 pa_assert(PA_SINK_IS_LINKED(s->state));
2493 ret = pa_idxset_size(s->inputs);
2495 /* We add in the number of streams connected to us here. Please
2496 * note the asymmetry to pa_sink_used_by()! */
2498 if (s->monitor_source)
2499 ret += pa_source_linked_by(s->monitor_source);
2504 /* Called from main thread */
2505 unsigned pa_sink_used_by(pa_sink *s) {
2508 pa_sink_assert_ref(s);
2509 pa_assert_ctl_context();
2510 pa_assert(PA_SINK_IS_LINKED(s->state));
2512 ret = pa_idxset_size(s->inputs);
2513 pa_assert(ret >= s->n_corked);
2515 /* Streams connected to our monitor source do not matter for
2516 * pa_sink_used_by()!.*/
2518 return ret - s->n_corked;
2521 /* Called from main thread */
2522 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2527 pa_sink_assert_ref(s);
2528 pa_assert_ctl_context();
2530 if (!PA_SINK_IS_LINKED(s->state))
2535 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2536 pa_sink_input_state_t st;
2538 if (i == ignore_input)
2541 st = pa_sink_input_get_state(i);
2543 /* We do not assert here. It is perfectly valid for a sink input to
2544 * be in the INIT state (i.e. created, marked done but not yet put)
2545 * and we should not care if it's unlinked as it won't contribute
2546 * towards our busy status.
2548 if (!PA_SINK_INPUT_IS_LINKED(st))
2551 if (st == PA_SINK_INPUT_CORKED)
2554 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2560 if (s->monitor_source)
2561 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2566 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2568 case PA_SINK_INIT: return "INIT";
2569 case PA_SINK_IDLE: return "IDLE";
2570 case PA_SINK_RUNNING: return "RUNNING";
2571 case PA_SINK_SUSPENDED: return "SUSPENDED";
2572 case PA_SINK_UNLINKED: return "UNLINKED";
2573 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2576 pa_assert_not_reached();
2579 /* Called from the IO thread */
2580 static void sync_input_volumes_within_thread(pa_sink *s) {
2584 pa_sink_assert_ref(s);
2585 pa_sink_assert_io_context(s);
2587 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2588 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2591 i->thread_info.soft_volume = i->soft_volume;
2592 pa_sink_input_request_rewind(i, 0, true, false, false);
2596 /* Called from the IO thread. Only called for the root sink in volume sharing
2597 * cases, except for internal recursive calls. */
2598 static void set_shared_volume_within_thread(pa_sink *s) {
2599 pa_sink_input *i = NULL;
2602 pa_sink_assert_ref(s);
2604 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2606 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2607 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2608 set_shared_volume_within_thread(i->origin_sink);
2612 /* Called from IO thread, except when it is not */
2613 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2614 pa_sink *s = PA_SINK(o);
2615 pa_sink_assert_ref(s);
2617 switch ((pa_sink_message_t) code) {
2619 case PA_SINK_MESSAGE_ADD_INPUT: {
2620 pa_sink_input *i = PA_SINK_INPUT(userdata);
2622 /* If you change anything here, make sure to change the
2623 * sink input handling a few lines down at
2624 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2626 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2628 /* Since the caller sleeps in pa_sink_input_put(), we can
2629 * safely access data outside of thread_info even though
2632 if ((i->thread_info.sync_prev = i->sync_prev)) {
2633 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2634 pa_assert(i->sync_prev->sync_next == i);
2635 i->thread_info.sync_prev->thread_info.sync_next = i;
2638 if ((i->thread_info.sync_next = i->sync_next)) {
2639 pa_assert(i->sink == i->thread_info.sync_next->sink);
2640 pa_assert(i->sync_next->sync_prev == i);
2641 i->thread_info.sync_next->thread_info.sync_prev = i;
2644 pa_sink_input_attach(i);
2646 pa_sink_input_set_state_within_thread(i, i->state);
2648 /* The requested latency of the sink input needs to be fixed up and
2649 * then configured on the sink. If this causes the sink latency to
2650 * go down, the sink implementor is responsible for doing a rewind
2651 * in the update_requested_latency() callback to ensure that the
2652 * sink buffer doesn't contain more data than what the new latency
2655 * XXX: Does it really make sense to push this responsibility to
2656 * the sink implementors? Wouldn't it be better to do it once in
2657 * the core than many times in the modules? */
2659 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2660 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2662 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2663 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2665 /* We don't rewind here automatically. This is left to the
2666 * sink input implementor because some sink inputs need a
2667 * slow start, i.e. need some time to buffer client
2668 * samples before beginning streaming.
2670 * XXX: Does it really make sense to push this functionality to
2671 * the sink implementors? Wouldn't it be better to do it once in
2672 * the core than many times in the modules? */
2674 /* In flat volume mode we need to update the volume as
2676 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2679 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2680 pa_sink_input *i = PA_SINK_INPUT(userdata);
2682 /* If you change anything here, make sure to change the
2683 * sink input handling a few lines down at
2684 * PA_SINK_MESSAGE_START_MOVE, too. */
2686 pa_sink_input_detach(i);
2688 pa_sink_input_set_state_within_thread(i, i->state);
2690 /* Since the caller sleeps in pa_sink_input_unlink(),
2691 * we can safely access data outside of thread_info even
2692 * though it is mutable */
2694 pa_assert(!i->sync_prev);
2695 pa_assert(!i->sync_next);
2697 if (i->thread_info.sync_prev) {
2698 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2699 i->thread_info.sync_prev = NULL;
2702 if (i->thread_info.sync_next) {
2703 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2704 i->thread_info.sync_next = NULL;
2707 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2708 pa_sink_invalidate_requested_latency(s, true);
2709 pa_sink_request_rewind(s, (size_t) -1);
2711 /* In flat volume mode we need to update the volume as
2713 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2716 case PA_SINK_MESSAGE_START_MOVE: {
2717 pa_sink_input *i = PA_SINK_INPUT(userdata);
2719 /* We don't support moving synchronized streams. */
2720 pa_assert(!i->sync_prev);
2721 pa_assert(!i->sync_next);
2722 pa_assert(!i->thread_info.sync_next);
2723 pa_assert(!i->thread_info.sync_prev);
2725 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2727 size_t sink_nbytes, total_nbytes;
2729 /* The old sink probably has some audio from this
2730 * stream in its buffer. We want to "take it back" as
2731 * much as possible and play it to the new sink. We
2732 * don't know at this point how much the old sink can
2733 * rewind. We have to pick something, and that
2734 * something is the full latency of the old sink here.
2735 * So we rewind the stream buffer by the sink latency
2736 * amount, which may be more than what we should
2737 * rewind. This can result in a chunk of audio being
2738 * played both to the old sink and the new sink.
2740 * FIXME: Fix this code so that we don't have to make
2741 * guesses about how much the sink will actually be
2742 * able to rewind. If someone comes up with a solution
2743 * for this, something to note is that the part of the
2744 * latency that the old sink couldn't rewind should
2745 * ideally be compensated after the stream has moved
2746 * to the new sink by adding silence. The new sink
2747 * most likely can't start playing the moved stream
2748 * immediately, and that gap should be removed from
2749 * the "compensation silence" (at least at the time of
2750 * writing this, the move finish code will actually
2751 * already take care of dropping the new sink's
2752 * unrewindable latency, so taking into account the
2753 * unrewindable latency of the old sink is the only
2756 * The render_memblockq contents are discarded,
2757 * because when the sink changes, the format of the
2758 * audio stored in the render_memblockq may change
2759 * too, making the stored audio invalid. FIXME:
2760 * However, the read and write indices are moved back
2761 * the same amount, so if they are not the same now,
2762 * they won't be the same after the rewind either. If
2763 * the write index of the render_memblockq is ahead of
2764 * the read index, then the render_memblockq will feed
2765 * the new sink some silence first, which it shouldn't
2766 * do. The write index should be flushed to be the
2767 * same as the read index. */
2769 /* Get the latency of the sink */
2770 usec = pa_sink_get_latency_within_thread(s, false);
2771 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2772 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2774 if (total_nbytes > 0) {
2775 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2776 i->thread_info.rewrite_flush = true;
2777 pa_sink_input_process_rewind(i, sink_nbytes);
2781 pa_sink_input_detach(i);
2783 /* Let's remove the sink input ...*/
2784 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2786 pa_sink_invalidate_requested_latency(s, true);
2788 pa_log_debug("Requesting rewind due to started move");
2789 pa_sink_request_rewind(s, (size_t) -1);
2791 /* In flat volume mode we need to update the volume as
2793 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2796 case PA_SINK_MESSAGE_FINISH_MOVE: {
2797 pa_sink_input *i = PA_SINK_INPUT(userdata);
2799 /* We don't support moving synchronized streams. */
2800 pa_assert(!i->sync_prev);
2801 pa_assert(!i->sync_next);
2802 pa_assert(!i->thread_info.sync_next);
2803 pa_assert(!i->thread_info.sync_prev);
2805 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2807 pa_sink_input_attach(i);
2809 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2813 /* In the ideal case the new sink would start playing
2814 * the stream immediately. That requires the sink to
2815 * be able to rewind all of its latency, which usually
2816 * isn't possible, so there will probably be some gap
2817 * before the moved stream becomes audible. We then
2818 * have two possibilities: 1) start playing the stream
2819 * from where it is now, or 2) drop the unrewindable
2820 * latency of the sink from the stream. With option 1
2821 * we won't lose any audio but the stream will have a
2822 * pause. With option 2 we may lose some audio but the
2823 * stream time will be somewhat in sync with the wall
2824 * clock. Lennart seems to have chosen option 2 (one
2825 * of the reasons might have been that option 1 is
2826 * actually much harder to implement), so we drop the
2827 * latency of the new sink from the moved stream and
2828 * hope that the sink will undo most of that in the
2831 /* Get the latency of the sink */
2832 usec = pa_sink_get_latency_within_thread(s, false);
2833 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2836 pa_sink_input_drop(i, nbytes);
2838 pa_log_debug("Requesting rewind due to finished move");
2839 pa_sink_request_rewind(s, nbytes);
2842 /* Updating the requested sink latency has to be done
2843 * after the sink rewind request, not before, because
2844 * otherwise the sink may limit the rewind amount
2847 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2848 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2850 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2851 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2853 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2856 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2857 pa_sink *root_sink = pa_sink_get_master(s);
2859 if (PA_LIKELY(root_sink))
2860 set_shared_volume_within_thread(root_sink);
2865 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2867 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2869 pa_sink_volume_change_push(s);
2871 /* Fall through ... */
2873 case PA_SINK_MESSAGE_SET_VOLUME:
2875 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2876 s->thread_info.soft_volume = s->soft_volume;
2877 pa_sink_request_rewind(s, (size_t) -1);
2880 /* Fall through ... */
2882 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2883 sync_input_volumes_within_thread(s);
2886 case PA_SINK_MESSAGE_GET_VOLUME:
2888 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2890 pa_sink_volume_change_flush(s);
2891 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2894 /* In case sink implementor reset SW volume. */
2895 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2896 s->thread_info.soft_volume = s->soft_volume;
2897 pa_sink_request_rewind(s, (size_t) -1);
2902 case PA_SINK_MESSAGE_SET_MUTE:
2904 if (s->thread_info.soft_muted != s->muted) {
2905 s->thread_info.soft_muted = s->muted;
2906 pa_sink_request_rewind(s, (size_t) -1);
2909 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2914 case PA_SINK_MESSAGE_GET_MUTE:
2916 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2917 return s->get_mute(s, userdata);
2921 case PA_SINK_MESSAGE_SET_STATE: {
2923 bool suspend_change =
2924 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2925 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2927 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2929 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2930 s->thread_info.rewind_nbytes = 0;
2931 s->thread_info.rewind_requested = false;
2934 if (suspend_change) {
2938 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2939 if (i->suspend_within_thread)
2940 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2946 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2948 pa_usec_t *usec = userdata;
2949 *usec = pa_sink_get_requested_latency_within_thread(s);
2951 /* Yes, that's right, the IO thread will see -1 when no
2952 * explicit requested latency is configured, the main
2953 * thread will see max_latency */
2954 if (*usec == (pa_usec_t) -1)
2955 *usec = s->thread_info.max_latency;
2960 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2961 pa_usec_t *r = userdata;
2963 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2968 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2969 pa_usec_t *r = userdata;
2971 r[0] = s->thread_info.min_latency;
2972 r[1] = s->thread_info.max_latency;
2977 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2979 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2982 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2984 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2987 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2989 *((size_t*) userdata) = s->thread_info.max_rewind;
2992 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2994 *((size_t*) userdata) = s->thread_info.max_request;
2997 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2999 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3002 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3004 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3007 case PA_SINK_MESSAGE_SET_PORT:
3009 pa_assert(userdata);
3011 struct sink_message_set_port *msg_data = userdata;
3012 msg_data->ret = s->set_port(s, msg_data->port);
3016 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3017 /* This message is sent from IO-thread and handled in main thread. */
3018 pa_assert_ctl_context();
3020 /* Make sure we're not messing with main thread when no longer linked */
3021 if (!PA_SINK_IS_LINKED(s->state))
3024 pa_sink_get_volume(s, true);
3025 pa_sink_get_mute(s, true);
3028 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3029 s->thread_info.port_latency_offset = offset;
3032 case PA_SINK_MESSAGE_GET_LATENCY:
3033 case PA_SINK_MESSAGE_MAX:
3040 /* Called from main thread */
3041 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3046 pa_core_assert_ref(c);
3047 pa_assert_ctl_context();
3048 pa_assert(cause != 0);
3050 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3053 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3060 /* Called from IO thread */
3061 void pa_sink_detach_within_thread(pa_sink *s) {
3065 pa_sink_assert_ref(s);
3066 pa_sink_assert_io_context(s);
3067 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3069 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3070 pa_sink_input_detach(i);
3072 if (s->monitor_source)
3073 pa_source_detach_within_thread(s->monitor_source);
3076 /* Called from IO thread */
3077 void pa_sink_attach_within_thread(pa_sink *s) {
3081 pa_sink_assert_ref(s);
3082 pa_sink_assert_io_context(s);
3083 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3085 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3086 pa_sink_input_attach(i);
3088 if (s->monitor_source)
3089 pa_source_attach_within_thread(s->monitor_source);
3092 /* Called from IO thread */
3093 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3094 pa_sink_assert_ref(s);
3095 pa_sink_assert_io_context(s);
3096 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3098 if (nbytes == (size_t) -1)
3099 nbytes = s->thread_info.max_rewind;
3101 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3103 if (s->thread_info.rewind_requested &&
3104 nbytes <= s->thread_info.rewind_nbytes)
3107 s->thread_info.rewind_nbytes = nbytes;
3108 s->thread_info.rewind_requested = true;
3110 if (s->request_rewind)
3111 s->request_rewind(s);
3114 /* Called from IO thread */
3115 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3116 pa_usec_t result = (pa_usec_t) -1;
3119 pa_usec_t monitor_latency;
3121 pa_sink_assert_ref(s);
3122 pa_sink_assert_io_context(s);
3124 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3125 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3127 if (s->thread_info.requested_latency_valid)
3128 return s->thread_info.requested_latency;
3130 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3131 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3132 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3133 result = i->thread_info.requested_sink_latency;
3135 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3137 if (monitor_latency != (pa_usec_t) -1 &&
3138 (result == (pa_usec_t) -1 || result > monitor_latency))
3139 result = monitor_latency;
3141 if (result != (pa_usec_t) -1)
3142 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3144 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3145 /* Only cache if properly initialized */
3146 s->thread_info.requested_latency = result;
3147 s->thread_info.requested_latency_valid = true;
3153 /* Called from main thread */
3154 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3157 pa_sink_assert_ref(s);
3158 pa_assert_ctl_context();
3159 pa_assert(PA_SINK_IS_LINKED(s->state));
3161 if (s->state == PA_SINK_SUSPENDED)
3164 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3169 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3170 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3174 pa_sink_assert_ref(s);
3175 pa_sink_assert_io_context(s);
3177 if (max_rewind == s->thread_info.max_rewind)
3180 s->thread_info.max_rewind = max_rewind;
3182 if (PA_SINK_IS_LINKED(s->thread_info.state))
3183 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3184 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3186 if (s->monitor_source)
3187 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3190 /* Called from main thread */
3191 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3192 pa_sink_assert_ref(s);
3193 pa_assert_ctl_context();
3195 if (PA_SINK_IS_LINKED(s->state))
3196 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3198 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3201 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3202 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3205 pa_sink_assert_ref(s);
3206 pa_sink_assert_io_context(s);
3208 if (max_request == s->thread_info.max_request)
3211 s->thread_info.max_request = max_request;
3213 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3216 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3217 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3221 /* Called from main thread */
3222 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3223 pa_sink_assert_ref(s);
3224 pa_assert_ctl_context();
3226 if (PA_SINK_IS_LINKED(s->state))
3227 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3229 pa_sink_set_max_request_within_thread(s, max_request);
3232 /* Called from IO thread */
3233 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3237 pa_sink_assert_ref(s);
3238 pa_sink_assert_io_context(s);
3240 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3241 s->thread_info.requested_latency_valid = false;
3245 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3247 if (s->update_requested_latency)
3248 s->update_requested_latency(s);
3250 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3251 if (i->update_sink_requested_latency)
3252 i->update_sink_requested_latency(i);
3256 /* Called from main thread */
3257 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3258 pa_sink_assert_ref(s);
3259 pa_assert_ctl_context();
3261 /* min_latency == 0: no limit
3262 * min_latency anything else: specified limit
3264 * Similar for max_latency */
3266 if (min_latency < ABSOLUTE_MIN_LATENCY)
3267 min_latency = ABSOLUTE_MIN_LATENCY;
3269 if (max_latency <= 0 ||
3270 max_latency > ABSOLUTE_MAX_LATENCY)
3271 max_latency = ABSOLUTE_MAX_LATENCY;
3273 pa_assert(min_latency <= max_latency);
3275 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3276 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3277 max_latency == ABSOLUTE_MAX_LATENCY) ||
3278 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3280 if (PA_SINK_IS_LINKED(s->state)) {
3286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3288 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3291 /* Called from main thread */
3292 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3293 pa_sink_assert_ref(s);
3294 pa_assert_ctl_context();
3295 pa_assert(min_latency);
3296 pa_assert(max_latency);
3298 if (PA_SINK_IS_LINKED(s->state)) {
3299 pa_usec_t r[2] = { 0, 0 };
3301 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3303 *min_latency = r[0];
3304 *max_latency = r[1];
3306 *min_latency = s->thread_info.min_latency;
3307 *max_latency = s->thread_info.max_latency;
3311 /* Called from IO thread */
3312 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3313 pa_sink_assert_ref(s);
3314 pa_sink_assert_io_context(s);
3316 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3317 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3318 pa_assert(min_latency <= max_latency);
3320 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3321 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3322 max_latency == ABSOLUTE_MAX_LATENCY) ||
3323 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3325 if (s->thread_info.min_latency == min_latency &&
3326 s->thread_info.max_latency == max_latency)
3329 s->thread_info.min_latency = min_latency;
3330 s->thread_info.max_latency = max_latency;
3332 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3336 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3337 if (i->update_sink_latency_range)
3338 i->update_sink_latency_range(i);
3341 pa_sink_invalidate_requested_latency(s, false);
3343 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3346 /* Called from main thread */
3347 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3348 pa_sink_assert_ref(s);
3349 pa_assert_ctl_context();
3351 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3352 pa_assert(latency == 0);
3356 if (latency < ABSOLUTE_MIN_LATENCY)
3357 latency = ABSOLUTE_MIN_LATENCY;
3359 if (latency > ABSOLUTE_MAX_LATENCY)
3360 latency = ABSOLUTE_MAX_LATENCY;
3362 if (PA_SINK_IS_LINKED(s->state))
3363 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3365 s->thread_info.fixed_latency = latency;
3367 pa_source_set_fixed_latency(s->monitor_source, latency);
3370 /* Called from main thread */
3371 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3374 pa_sink_assert_ref(s);
3375 pa_assert_ctl_context();
3377 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3380 if (PA_SINK_IS_LINKED(s->state))
3381 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3383 latency = s->thread_info.fixed_latency;
3388 /* Called from IO thread */
3389 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3390 pa_sink_assert_ref(s);
3391 pa_sink_assert_io_context(s);
3393 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3394 pa_assert(latency == 0);
3395 s->thread_info.fixed_latency = 0;
3397 if (s->monitor_source)
3398 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3403 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3404 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3406 if (s->thread_info.fixed_latency == latency)
3409 s->thread_info.fixed_latency = latency;
3411 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3415 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3416 if (i->update_sink_fixed_latency)
3417 i->update_sink_fixed_latency(i);
3420 pa_sink_invalidate_requested_latency(s, false);
3422 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3425 /* Called from main context */
3426 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3427 pa_sink_assert_ref(s);
3429 s->port_latency_offset = offset;
3431 if (PA_SINK_IS_LINKED(s->state))
3432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3434 s->thread_info.port_latency_offset = offset;
3436 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3439 /* Called from main context */
3440 size_t pa_sink_get_max_rewind(pa_sink *s) {
3442 pa_assert_ctl_context();
3443 pa_sink_assert_ref(s);
3445 if (!PA_SINK_IS_LINKED(s->state))
3446 return s->thread_info.max_rewind;
3448 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3453 /* Called from main context */
3454 size_t pa_sink_get_max_request(pa_sink *s) {
3456 pa_sink_assert_ref(s);
3457 pa_assert_ctl_context();
3459 if (!PA_SINK_IS_LINKED(s->state))
3460 return s->thread_info.max_request;
3462 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3467 /* Called from main context */
3468 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3469 pa_device_port *port;
3472 pa_sink_assert_ref(s);
3473 pa_assert_ctl_context();
3476 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3477 return -PA_ERR_NOTIMPLEMENTED;
3481 return -PA_ERR_NOENTITY;
3483 if (!(port = pa_hashmap_get(s->ports, name)))
3484 return -PA_ERR_NOENTITY;
3486 if (s->active_port == port) {
3487 s->save_port = s->save_port || save;
3491 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3492 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3493 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3497 ret = s->set_port(s, port);
3500 return -PA_ERR_NOENTITY;
3502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3504 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3506 s->active_port = port;
3507 s->save_port = save;
3509 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3511 /* The active port affects the default sink selection. */
3512 pa_core_update_default_sink(s->core);
3514 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3519 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3520 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3524 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3527 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3529 if (pa_streq(ff, "microphone"))
3530 t = "audio-input-microphone";
3531 else if (pa_streq(ff, "webcam"))
3533 else if (pa_streq(ff, "computer"))
3535 else if (pa_streq(ff, "handset"))
3537 else if (pa_streq(ff, "portable"))
3538 t = "multimedia-player";
3539 else if (pa_streq(ff, "tv"))
3540 t = "video-display";
3543 * The following icons are not part of the icon naming spec,
3544 * because Rodney Dawes sucks as the maintainer of that spec.
3546 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3548 else if (pa_streq(ff, "headset"))
3549 t = "audio-headset";
3550 else if (pa_streq(ff, "headphone"))
3551 t = "audio-headphones";
3552 else if (pa_streq(ff, "speaker"))
3553 t = "audio-speakers";
3554 else if (pa_streq(ff, "hands-free"))
3555 t = "audio-handsfree";
3559 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3560 if (pa_streq(c, "modem"))
3567 t = "audio-input-microphone";
3570 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3571 if (strstr(profile, "analog"))
3573 else if (strstr(profile, "iec958"))
3575 else if (strstr(profile, "hdmi"))
3579 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3581 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3586 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3587 const char *s, *d = NULL, *k;
3590 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3594 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3598 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3599 if (pa_streq(s, "internal"))
3600 d = _("Built-in Audio");
3603 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3604 if (pa_streq(s, "modem"))
3608 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3613 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3616 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3618 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3623 bool pa_device_init_intended_roles(pa_proplist *p) {
3627 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3630 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3631 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3632 || pa_streq(s, "headset")) {
3633 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3640 unsigned pa_device_init_priority(pa_proplist *p) {
3642 unsigned priority = 0;
3646 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3648 if (pa_streq(s, "sound"))
3650 else if (!pa_streq(s, "modem"))
3654 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3656 if (pa_streq(s, "headphone"))
3658 else if (pa_streq(s, "hifi"))
3660 else if (pa_streq(s, "speaker"))
3662 else if (pa_streq(s, "portable"))
3666 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3668 if (pa_streq(s, "bluetooth"))
3670 else if (pa_streq(s, "usb"))
3672 else if (pa_streq(s, "pci"))
3676 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3678 if (pa_startswith(s, "analog-"))
3680 else if (pa_startswith(s, "iec958-"))
3687 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3689 /* Called from the IO thread. */
3690 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3691 pa_sink_volume_change *c;
3692 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3693 c = pa_xnew(pa_sink_volume_change, 1);
3695 PA_LLIST_INIT(pa_sink_volume_change, c);
3697 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3701 /* Called from the IO thread. */
3702 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3704 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3708 /* Called from the IO thread. */
3709 void pa_sink_volume_change_push(pa_sink *s) {
3710 pa_sink_volume_change *c = NULL;
3711 pa_sink_volume_change *nc = NULL;
3712 pa_sink_volume_change *pc = NULL;
3713 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3715 const char *direction = NULL;
3718 nc = pa_sink_volume_change_new(s);
3720 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3721 * Adding one more volume for HW would get us rid of this, but I am trying
3722 * to survive with the ones we already have. */
3723 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3725 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3726 pa_log_debug("Volume not changing");
3727 pa_sink_volume_change_free(nc);
3731 nc->at = pa_sink_get_latency_within_thread(s, false);
3732 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3734 if (s->thread_info.volume_changes_tail) {
3735 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3736 /* If volume is going up let's do it a bit late. If it is going
3737 * down let's do it a bit early. */
3738 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3739 if (nc->at + safety_margin > c->at) {
3740 nc->at += safety_margin;
3745 else if (nc->at - safety_margin > c->at) {
3746 nc->at -= safety_margin;
3754 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3755 nc->at += safety_margin;
3758 nc->at -= safety_margin;
3761 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3764 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3767 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3769 /* We can ignore volume events that came earlier but should happen later than this. */
3770 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3771 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3772 pa_sink_volume_change_free(c);
3775 s->thread_info.volume_changes_tail = nc;
3778 /* Called from the IO thread. */
3779 static void pa_sink_volume_change_flush(pa_sink *s) {
3780 pa_sink_volume_change *c = s->thread_info.volume_changes;
3782 s->thread_info.volume_changes = NULL;
3783 s->thread_info.volume_changes_tail = NULL;
3785 pa_sink_volume_change *next = c->next;
3786 pa_sink_volume_change_free(c);
3791 /* Called from the IO thread. */
3792 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3798 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3804 pa_assert(s->write_volume);
3806 now = pa_rtclock_now();
3808 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3809 pa_sink_volume_change *c = s->thread_info.volume_changes;
3810 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3811 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3812 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3814 s->thread_info.current_hw_volume = c->hw_volume;
3815 pa_sink_volume_change_free(c);
3821 if (s->thread_info.volume_changes) {
3823 *usec_to_next = s->thread_info.volume_changes->at - now;
3824 if (pa_log_ratelimit(PA_LOG_DEBUG))
3825 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3830 s->thread_info.volume_changes_tail = NULL;
3835 /* Called from the IO thread. */
3836 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3837 /* All the queued volume events later than current latency are shifted to happen earlier. */
3838 pa_sink_volume_change *c;
3839 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3840 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3841 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3843 pa_log_debug("latency = %lld", (long long) limit);
3844 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3846 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3847 pa_usec_t modified_limit = limit;
3848 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3849 modified_limit -= s->thread_info.volume_change_safety_margin;
3851 modified_limit += s->thread_info.volume_change_safety_margin;
3852 if (c->at > modified_limit) {
3854 if (c->at < modified_limit)
3855 c->at = modified_limit;
3857 prev_vol = pa_cvolume_avg(&c->hw_volume);
3859 pa_sink_volume_change_apply(s, NULL);
3862 /* Called from the main thread */
3863 /* Gets the list of formats supported by the sink. The members and idxset must
3864 * be freed by the caller. */
3865 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3870 if (s->get_formats) {
3871 /* Sink supports format query, all is good */
3872 ret = s->get_formats(s);
3874 /* Sink doesn't support format query, so assume it does PCM */
3875 pa_format_info *f = pa_format_info_new();
3876 f->encoding = PA_ENCODING_PCM;
3878 ret = pa_idxset_new(NULL, NULL);
3879 pa_idxset_put(ret, f, NULL);
3885 /* Called from the main thread */
3886 /* Allows an external source to set what formats a sink supports if the sink
3887 * permits this. The function makes a copy of the formats on success. */
3888 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3893 /* Sink supports setting formats -- let's give it a shot */
3894 return s->set_formats(s, formats);
3896 /* Sink doesn't support setting this -- bail out */
3900 /* Called from the main thread */
3901 /* Checks if the sink can accept this format */
3902 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3903 pa_idxset *formats = NULL;
3909 formats = pa_sink_get_formats(s);
3912 pa_format_info *finfo_device;
3915 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3916 if (pa_format_info_is_compatible(finfo_device, f)) {
3922 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3928 /* Called from the main thread */
3929 /* Calculates the intersection between formats supported by the sink and
3930 * in_formats, and returns these, in the order of the sink's formats. */
3931 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3932 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3933 pa_format_info *f_sink, *f_in;
3938 if (!in_formats || pa_idxset_isempty(in_formats))
3941 sink_formats = pa_sink_get_formats(s);
3943 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3944 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3945 if (pa_format_info_is_compatible(f_sink, f_in))
3946 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3952 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3957 /* Called from the main thread. */
3958 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3959 pa_cvolume old_volume;
3960 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3961 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3966 old_volume = s->reference_volume;
3968 if (pa_cvolume_equal(volume, &old_volume))
3971 s->reference_volume = *volume;
3972 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3973 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3974 s->flags & PA_SINK_DECIBEL_VOLUME),
3975 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3976 s->flags & PA_SINK_DECIBEL_VOLUME));
3978 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3979 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);