2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
55 #define MAX_MIX_CHANNELS 32
56 #define MIX_BUFFER_LENGTH (pa_page_size())
57 #define ABSOLUTE_MIN_LATENCY (500)
58 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
59 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
61 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
63 struct pa_sink_volume_change {
67 PA_LLIST_FIELDS(pa_sink_volume_change);
70 struct set_state_data {
71 pa_sink_state_t state;
72 pa_suspend_cause_t suspend_cause;
75 static void sink_free(pa_object *s);
77 static void pa_sink_volume_change_push(pa_sink *s);
78 static void pa_sink_volume_change_flush(pa_sink *s);
79 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
82 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
84 char *dump_time = NULL, *dump_path_surfix = NULL;
85 const char *s_device_api_str, *card_name_str, *device_idx_str;
90 /* open file for dump pcm */
91 if (s->core->pcm_dump & PA_PCM_DUMP_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
92 pa_gettimeofday(&now);
93 localtime_r(&now.tv_sec, &tm);
94 memset(&datetime[0], 0x00, sizeof(datetime));
95 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
96 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
98 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
99 if (pa_streq(s_device_api_str, "alsa")) {
100 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
101 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
102 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
104 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
107 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
110 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
111 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
113 s->pcm_dump_fp = fopen(s->dump_path, "w");
115 pa_log_warn("%s open failed", s->dump_path);
117 pa_log_info("%s opened", s->dump_path);
120 pa_xfree(dump_path_surfix);
121 /* close file for dump pcm when config is changed */
122 } else if (~s->core->pcm_dump & PA_PCM_DUMP_SINK && s->pcm_dump_fp) {
123 fclose(s->pcm_dump_fp);
124 pa_log_info("%s closed", s->dump_path);
125 pa_xfree(s->dump_path);
126 s->pcm_dump_fp = NULL;
130 if (s->pcm_dump_fp) {
133 ptr = pa_memblock_acquire(chunk->memblock);
135 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
137 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
139 pa_memblock_release(chunk->memblock);
144 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
148 data->proplist = pa_proplist_new();
149 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
154 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
157 pa_xfree(data->name);
158 data->name = pa_xstrdup(name);
161 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
164 if ((data->sample_spec_is_set = !!spec))
165 data->sample_spec = *spec;
168 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
171 if ((data->channel_map_is_set = !!map))
172 data->channel_map = *map;
175 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
178 data->alternate_sample_rate_is_set = true;
179 data->alternate_sample_rate = alternate_sample_rate;
182 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
185 if ((data->volume_is_set = !!volume))
186 data->volume = *volume;
189 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
192 data->muted_is_set = true;
196 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
199 pa_xfree(data->active_port);
200 data->active_port = pa_xstrdup(port);
203 void pa_sink_new_data_done(pa_sink_new_data *data) {
206 pa_proplist_free(data->proplist);
209 pa_hashmap_free(data->ports);
211 pa_xfree(data->name);
212 pa_xfree(data->active_port);
215 /* Called from main context */
216 static void reset_callbacks(pa_sink *s) {
219 s->set_state_in_main_thread = NULL;
220 s->set_state_in_io_thread = NULL;
221 s->get_volume = NULL;
222 s->set_volume = NULL;
223 s->write_volume = NULL;
226 s->request_rewind = NULL;
227 s->update_requested_latency = NULL;
229 s->get_formats = NULL;
230 s->set_formats = NULL;
231 s->reconfigure = NULL;
234 /* Called from main context */
235 pa_sink* pa_sink_new(
237 pa_sink_new_data *data,
238 pa_sink_flags_t flags) {
242 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
243 pa_source_new_data source_data;
249 pa_assert(data->name);
250 pa_assert_ctl_context();
252 s = pa_msgobject_new(pa_sink);
254 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
255 pa_log_debug("Failed to register name %s.", data->name);
260 pa_sink_new_data_set_name(data, name);
262 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
264 pa_namereg_unregister(core, name);
268 /* FIXME, need to free s here on failure */
270 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
271 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
273 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
275 if (!data->channel_map_is_set)
276 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
278 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
279 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
281 /* FIXME: There should probably be a general function for checking whether
282 * the sink volume is allowed to be set, like there is for sink inputs. */
283 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
285 if (!data->volume_is_set) {
286 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
287 data->save_volume = false;
290 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
291 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
293 if (!data->muted_is_set)
297 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
299 pa_device_init_description(data->proplist, data->card);
300 pa_device_init_icon(data->proplist, true);
301 pa_device_init_intended_roles(data->proplist);
303 if (!data->active_port) {
304 pa_device_port *p = pa_device_port_find_best(data->ports);
306 pa_sink_new_data_set_port(data, p->name);
309 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
311 pa_namereg_unregister(core, name);
315 s->parent.parent.free = sink_free;
316 s->parent.process_msg = pa_sink_process_msg;
319 s->state = PA_SINK_INIT;
322 s->suspend_cause = data->suspend_cause;
323 s->name = pa_xstrdup(name);
324 s->proplist = pa_proplist_copy(data->proplist);
325 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
326 s->module = data->module;
327 s->card = data->card;
329 s->priority = pa_device_init_priority(s->proplist);
331 s->sample_spec = data->sample_spec;
332 s->channel_map = data->channel_map;
333 s->default_sample_rate = s->sample_spec.rate;
335 if (data->alternate_sample_rate_is_set)
336 s->alternate_sample_rate = data->alternate_sample_rate;
338 s->alternate_sample_rate = s->core->alternate_sample_rate;
340 s->avoid_resampling = data->avoid_resampling;
342 s->origin_avoid_resampling = data->avoid_resampling;
343 s->selected_sample_format = s->sample_spec.format;
344 s->selected_sample_rate = s->sample_spec.rate;
347 s->inputs = pa_idxset_new(NULL, NULL);
349 s->input_to_master = NULL;
351 s->reference_volume = s->real_volume = data->volume;
352 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
353 s->base_volume = PA_VOLUME_NORM;
354 s->n_volume_steps = PA_VOLUME_NORM+1;
355 s->muted = data->muted;
356 s->refresh_volume = s->refresh_muted = false;
363 /* As a minor optimization we just steal the list instead of
365 s->ports = data->ports;
368 s->active_port = NULL;
369 s->save_port = false;
371 if (data->active_port)
372 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
373 s->save_port = data->save_port;
375 /* Hopefully the active port has already been assigned in the previous call
376 to pa_device_port_find_best, but better safe than sorry */
378 s->active_port = pa_device_port_find_best(s->ports);
381 s->port_latency_offset = s->active_port->latency_offset;
383 s->port_latency_offset = 0;
385 s->save_volume = data->save_volume;
386 s->save_muted = data->save_muted;
387 #ifdef TIZEN_PCM_DUMP
388 s->pcm_dump_fp = NULL;
392 pa_silence_memchunk_get(
393 &core->silence_cache,
399 s->thread_info.rtpoll = NULL;
400 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
401 (pa_free_cb_t) pa_sink_input_unref);
402 s->thread_info.soft_volume = s->soft_volume;
403 s->thread_info.soft_muted = s->muted;
404 s->thread_info.state = s->state;
405 s->thread_info.rewind_nbytes = 0;
406 s->thread_info.rewind_requested = false;
407 s->thread_info.max_rewind = 0;
408 s->thread_info.max_request = 0;
409 s->thread_info.requested_latency_valid = false;
410 s->thread_info.requested_latency = 0;
411 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
412 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
413 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
415 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
416 s->thread_info.volume_changes_tail = NULL;
417 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
418 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
419 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
420 s->thread_info.port_latency_offset = s->port_latency_offset;
422 /* FIXME: This should probably be moved to pa_sink_put() */
423 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
426 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
428 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
429 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
432 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
433 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
437 pa_source_new_data_init(&source_data);
438 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
439 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
440 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
441 source_data.name = pa_sprintf_malloc("%s.monitor", name);
442 source_data.driver = data->driver;
443 source_data.module = data->module;
444 source_data.card = data->card;
445 source_data.avoid_resampling = data->avoid_resampling;
447 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
448 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
449 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
451 s->monitor_source = pa_source_new(core, &source_data,
452 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
453 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
455 pa_source_new_data_done(&source_data);
457 if (!s->monitor_source) {
463 s->monitor_source->monitor_of = s;
465 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
466 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
467 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
472 /* Called from main context */
473 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
476 bool suspend_cause_changed;
479 pa_sink_state_t old_state;
480 pa_suspend_cause_t old_suspend_cause;
483 pa_assert_ctl_context();
485 state_changed = state != s->state;
486 suspend_cause_changed = suspend_cause != s->suspend_cause;
488 if (!state_changed && !suspend_cause_changed)
491 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
492 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
494 /* If we are resuming, suspend_cause must be 0. */
495 pa_assert(!resuming || !suspend_cause);
497 /* Here's something to think about: what to do with the suspend cause if
498 * resuming the sink fails? The old suspend cause will be incorrect, so we
499 * can't use that. On the other hand, if we set no suspend cause (as is the
500 * case currently), then it looks strange to have a sink suspended without
501 * any cause. It might be a good idea to add a new "resume failed" suspend
502 * cause, or it might just add unnecessary complexity, given that the
503 * current approach of not setting any suspend cause works well enough. */
505 if (s->set_state_in_main_thread) {
506 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
507 /* set_state_in_main_thread() is allowed to fail only when resuming. */
510 /* If resuming fails, we set the state to SUSPENDED and
511 * suspend_cause to 0. */
512 state = PA_SINK_SUSPENDED;
514 state_changed = false;
515 suspend_cause_changed = suspend_cause != s->suspend_cause;
518 /* We know the state isn't changing. If the suspend cause isn't
519 * changing either, then there's nothing more to do. */
520 if (!suspend_cause_changed)
526 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
528 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
529 /* SET_STATE is allowed to fail only when resuming. */
532 if (s->set_state_in_main_thread)
533 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
535 /* If resuming fails, we set the state to SUSPENDED and
536 * suspend_cause to 0. */
537 state = PA_SINK_SUSPENDED;
539 state_changed = false;
540 suspend_cause_changed = suspend_cause != s->suspend_cause;
543 /* We know the state isn't changing. If the suspend cause isn't
544 * changing either, then there's nothing more to do. */
545 if (!suspend_cause_changed)
550 #ifdef TIZEN_PCM_DUMP
551 /* close file for dump pcm */
552 if (s->pcm_dump_fp && (s->core->pcm_dump & PA_PCM_DUMP_SEPARATED) && suspending) {
553 fclose(s->pcm_dump_fp);
554 pa_log_info("%s closed", s->dump_path);
555 pa_xfree(s->dump_path);
556 s->pcm_dump_fp = NULL;
559 old_suspend_cause = s->suspend_cause;
560 if (suspend_cause_changed) {
561 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
562 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
564 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
565 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
566 s->suspend_cause = suspend_cause;
569 old_state = s->state;
571 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
574 /* If we enter UNLINKED state, then we don't send change notifications.
575 * pa_sink_unlink() will send unlink notifications instead. */
576 if (state != PA_SINK_UNLINKED) {
577 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
578 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
582 if (suspending || resuming || suspend_cause_changed) {
586 /* We're suspending or resuming, tell everyone about it */
588 PA_IDXSET_FOREACH(i, s->inputs, idx)
589 if (s->state == PA_SINK_SUSPENDED &&
590 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
591 pa_sink_input_kill(i);
593 i->suspend(i, old_state, old_suspend_cause);
596 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
597 pa_source_sync_suspend(s->monitor_source);
602 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
608 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
609 pa_sink_flags_t flags;
612 pa_assert(!s->write_volume || cb);
616 /* Save the current flags so we can tell if they've changed */
620 /* The sink implementor is responsible for setting decibel volume support */
621 s->flags |= PA_SINK_HW_VOLUME_CTRL;
623 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
624 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
625 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
628 /* If the flags have changed after init, let any clients know via a change event */
629 if (s->state != PA_SINK_INIT && flags != s->flags)
630 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
633 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
634 pa_sink_flags_t flags;
637 pa_assert(!cb || s->set_volume);
639 s->write_volume = cb;
641 /* Save the current flags so we can tell if they've changed */
645 s->flags |= PA_SINK_DEFERRED_VOLUME;
647 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
649 /* If the flags have changed after init, let any clients know via a change event */
650 if (s->state != PA_SINK_INIT && flags != s->flags)
651 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
654 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
660 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
661 pa_sink_flags_t flags;
667 /* Save the current flags so we can tell if they've changed */
671 s->flags |= PA_SINK_HW_MUTE_CTRL;
673 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
675 /* If the flags have changed after init, let any clients know via a change event */
676 if (s->state != PA_SINK_INIT && flags != s->flags)
677 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
680 static void enable_flat_volume(pa_sink *s, bool enable) {
681 pa_sink_flags_t flags;
685 /* Always follow the overall user preference here */
686 enable = enable && s->core->flat_volumes;
688 /* Save the current flags so we can tell if they've changed */
692 s->flags |= PA_SINK_FLAT_VOLUME;
694 s->flags &= ~PA_SINK_FLAT_VOLUME;
696 /* If the flags have changed after init, let any clients know via a change event */
697 if (s->state != PA_SINK_INIT && flags != s->flags)
698 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
701 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
702 pa_sink_flags_t flags;
706 /* Save the current flags so we can tell if they've changed */
710 s->flags |= PA_SINK_DECIBEL_VOLUME;
711 enable_flat_volume(s, true);
713 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
714 enable_flat_volume(s, false);
717 /* If the flags have changed after init, let any clients know via a change event */
718 if (s->state != PA_SINK_INIT && flags != s->flags)
719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
722 /* Called from main context */
723 void pa_sink_put(pa_sink* s) {
724 pa_sink_assert_ref(s);
725 pa_assert_ctl_context();
727 pa_assert(s->state == PA_SINK_INIT);
728 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
730 /* The following fields must be initialized properly when calling _put() */
731 pa_assert(s->asyncmsgq);
732 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
734 /* Generally, flags should be initialized via pa_sink_new(). As a
735 * special exception we allow some volume related flags to be set
736 * between _new() and _put() by the callback setter functions above.
738 * Thus we implement a couple safeguards here which ensure the above
739 * setters were used (or at least the implementor made manual changes
740 * in a compatible way).
742 * Note: All of these flags set here can change over the life time
744 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
745 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
746 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
748 /* XXX: Currently decibel volume is disabled for all sinks that use volume
749 * sharing. When the master sink supports decibel volume, it would be good
750 * to have the flag also in the filter sink, but currently we don't do that
751 * so that the flags of the filter sink never change when it's moved from
752 * a master sink to another. One solution for this problem would be to
753 * remove user-visible volume altogether from filter sinks when volume
754 * sharing is used, but the current approach was easier to implement... */
755 /* We always support decibel volumes in software, otherwise we leave it to
756 * the sink implementor to set this flag as needed.
758 * Note: This flag can also change over the life time of the sink. */
759 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
760 pa_sink_enable_decibel_volume(s, true);
761 s->soft_volume = s->reference_volume;
764 /* If the sink implementor support DB volumes by itself, we should always
765 * try and enable flat volumes too */
766 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
767 enable_flat_volume(s, true);
769 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
770 pa_sink *root_sink = pa_sink_get_master(s);
772 pa_assert(root_sink);
774 s->reference_volume = root_sink->reference_volume;
775 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
777 s->real_volume = root_sink->real_volume;
778 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
780 /* We assume that if the sink implementor changed the default
781 * volume he did so in real_volume, because that is the usual
782 * place where he is supposed to place his changes. */
783 s->reference_volume = s->real_volume;
785 s->thread_info.soft_volume = s->soft_volume;
786 s->thread_info.soft_muted = s->muted;
787 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
789 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
790 || (s->base_volume == PA_VOLUME_NORM
791 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
792 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
793 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
794 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
795 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
797 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
798 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
799 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
801 if (s->suspend_cause)
802 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
804 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
806 pa_source_put(s->monitor_source);
808 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
809 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
811 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
812 * because module-switch-on-connect needs to know the old default sink */
813 pa_core_update_default_sink(s->core);
816 /* Called from main context */
817 void pa_sink_unlink(pa_sink* s) {
819 pa_sink_input *i, PA_UNUSED *j = NULL;
821 pa_sink_assert_ref(s);
822 pa_assert_ctl_context();
824 /* Please note that pa_sink_unlink() does more than simply
825 * reversing pa_sink_put(). It also undoes the registrations
826 * already done in pa_sink_new()! */
828 if (s->unlink_requested)
831 s->unlink_requested = true;
833 linked = PA_SINK_IS_LINKED(s->state);
836 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
838 if (s->state != PA_SINK_UNLINKED)
839 pa_namereg_unregister(s->core, s->name);
840 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
842 pa_core_update_default_sink(s->core);
845 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
847 while ((i = pa_idxset_first(s->inputs, NULL))) {
849 pa_sink_input_kill(i);
854 /* It's important to keep the suspend cause unchanged when unlinking,
855 * because if we remove the SESSION suspend cause here, the alsa sink
856 * will sync its volume with the hardware while another user is
857 * active, messing up the volume for that other user. */
858 sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
860 s->state = PA_SINK_UNLINKED;
864 if (s->monitor_source)
865 pa_source_unlink(s->monitor_source);
868 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
869 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
873 /* Called from main context */
874 static void sink_free(pa_object *o) {
875 pa_sink *s = PA_SINK(o);
878 pa_assert_ctl_context();
879 pa_assert(pa_sink_refcnt(s) == 0);
880 pa_assert(!PA_SINK_IS_LINKED(s->state));
882 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
884 pa_sink_volume_change_flush(s);
886 if (s->monitor_source) {
887 pa_source_unref(s->monitor_source);
888 s->monitor_source = NULL;
891 pa_idxset_free(s->inputs, NULL);
892 pa_hashmap_free(s->thread_info.inputs);
894 if (s->silence.memblock)
895 pa_memblock_unref(s->silence.memblock);
901 pa_proplist_free(s->proplist);
904 pa_hashmap_free(s->ports);
906 #ifdef TIZEN_PCM_DUMP
907 /* close file for dump pcm */
908 if (s->pcm_dump_fp) {
909 fclose(s->pcm_dump_fp);
910 pa_log_info("%s closed", s->dump_path);
911 pa_xfree(s->dump_path);
912 s->pcm_dump_fp = NULL;
918 /* Called from main context, and not while the IO thread is active, please */
919 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
920 pa_sink_assert_ref(s);
921 pa_assert_ctl_context();
925 if (s->monitor_source)
926 pa_source_set_asyncmsgq(s->monitor_source, q);
929 /* Called from main context, and not while the IO thread is active, please */
930 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
931 pa_sink_flags_t old_flags;
932 pa_sink_input *input;
935 pa_sink_assert_ref(s);
936 pa_assert_ctl_context();
938 /* For now, allow only a minimal set of flags to be changed. */
939 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
941 old_flags = s->flags;
942 s->flags = (s->flags & ~mask) | (value & mask);
944 if (s->flags == old_flags)
947 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
948 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
950 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
951 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
952 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
954 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
955 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
957 if (s->monitor_source)
958 pa_source_update_flags(s->monitor_source,
959 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
960 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
961 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
962 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
964 PA_IDXSET_FOREACH(input, s->inputs, idx) {
965 if (input->origin_sink)
966 pa_sink_update_flags(input->origin_sink, mask, value);
970 /* Called from IO context, or before _put() from main context */
971 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
972 pa_sink_assert_ref(s);
973 pa_sink_assert_io_context(s);
975 s->thread_info.rtpoll = p;
977 if (s->monitor_source)
978 pa_source_set_rtpoll(s->monitor_source, p);
981 /* Called from main context */
982 int pa_sink_update_status(pa_sink*s) {
983 pa_sink_assert_ref(s);
984 pa_assert_ctl_context();
985 pa_assert(PA_SINK_IS_LINKED(s->state));
987 if (s->state == PA_SINK_SUSPENDED)
990 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
993 /* Called from main context */
994 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
995 pa_suspend_cause_t merged_cause;
997 pa_sink_assert_ref(s);
998 pa_assert_ctl_context();
999 pa_assert(PA_SINK_IS_LINKED(s->state));
1000 pa_assert(cause != 0);
1003 merged_cause = s->suspend_cause | cause;
1005 merged_cause = s->suspend_cause & ~cause;
1008 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
1010 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1013 /* Called from main context */
1014 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1015 pa_sink_input *i, *n;
1018 pa_sink_assert_ref(s);
1019 pa_assert_ctl_context();
1020 pa_assert(PA_SINK_IS_LINKED(s->state));
1025 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1026 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1028 pa_sink_input_ref(i);
1030 if (pa_sink_input_start_move(i) >= 0)
1031 pa_queue_push(q, i);
1033 pa_sink_input_unref(i);
1039 /* Called from main context */
1040 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1043 pa_sink_assert_ref(s);
1044 pa_assert_ctl_context();
1045 pa_assert(PA_SINK_IS_LINKED(s->state));
1048 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1049 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1050 if (pa_sink_input_finish_move(i, s, save) < 0)
1051 pa_sink_input_fail_move(i);
1054 pa_sink_input_unref(i);
1057 pa_queue_free(q, NULL);
1060 /* Called from main context */
1061 void pa_sink_move_all_fail(pa_queue *q) {
1064 pa_assert_ctl_context();
1067 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1068 pa_sink_input_fail_move(i);
1069 pa_sink_input_unref(i);
1072 pa_queue_free(q, NULL);
1075 /* Called from IO thread context */
1076 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1081 pa_sink_assert_ref(s);
1082 pa_sink_assert_io_context(s);
1084 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1085 size_t uf = i->thread_info.underrun_for_sink;
1087 /* Propagate down the filter tree */
1088 if (i->origin_sink) {
1089 size_t filter_result, left_to_play_origin;
1091 /* The recursive call works in the origin sink domain ... */
1092 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1094 /* .. and returns the time to sleep before waking up. We need the
1095 * underrun duration for comparisons, so we undo the subtraction on
1096 * the return value... */
1097 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1099 /* ... and convert it back to the master sink domain */
1100 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1102 /* Remember the longest underrun so far */
1103 if (filter_result > result)
1104 result = filter_result;
1108 /* No underrun here, move on */
1110 } else if (uf >= left_to_play) {
1111 /* The sink has possibly consumed all the data the sink input provided */
1112 pa_sink_input_process_underrun(i);
1113 } else if (uf > result) {
1114 /* Remember the longest underrun so far */
1120 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1121 (long) result, (long) left_to_play - result);
1122 return left_to_play - result;
1125 /* Called from IO thread context */
1126 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1130 pa_sink_assert_ref(s);
1131 pa_sink_assert_io_context(s);
1132 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1134 /* If nobody requested this and this is actually no real rewind
1135 * then we can short cut this. Please note that this means that
1136 * not all rewind requests triggered upstream will always be
1137 * translated in actual requests! */
1138 if (!s->thread_info.rewind_requested && nbytes <= 0)
1141 s->thread_info.rewind_nbytes = 0;
1142 s->thread_info.rewind_requested = false;
1145 pa_log_debug("Processing rewind...");
1146 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1147 pa_sink_volume_change_rewind(s, nbytes);
1148 #ifdef TIZEN_PCM_DUMP
1151 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1155 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1156 pa_sink_input_assert_ref(i);
1157 pa_sink_input_process_rewind(i, nbytes);
1161 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1162 pa_source_process_rewind(s->monitor_source, nbytes);
1166 /* Called from IO thread context */
1167 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1171 size_t mixlength = *length;
1173 pa_sink_assert_ref(s);
1174 pa_sink_assert_io_context(s);
1177 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1178 pa_sink_input_assert_ref(i);
1180 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1182 if (mixlength == 0 || info->chunk.length < mixlength)
1183 mixlength = info->chunk.length;
1185 if (pa_memblock_is_silence(info->chunk.memblock)) {
1186 pa_memblock_unref(info->chunk.memblock);
1190 info->userdata = pa_sink_input_ref(i);
1192 pa_assert(info->chunk.memblock);
1193 pa_assert(info->chunk.length > 0);
1201 *length = mixlength;
1206 /* Called from IO thread context */
1207 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1211 unsigned n_unreffed = 0;
1213 pa_sink_assert_ref(s);
1214 pa_sink_assert_io_context(s);
1216 pa_assert(result->memblock);
1217 pa_assert(result->length > 0);
1219 /* We optimize for the case where the order of the inputs has not changed */
1221 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1223 pa_mix_info* m = NULL;
1225 pa_sink_input_assert_ref(i);
1227 /* Let's try to find the matching entry info the pa_mix_info array */
1228 for (j = 0; j < n; j ++) {
1230 if (info[p].userdata == i) {
1240 /* Drop read data */
1241 pa_sink_input_drop(i, result->length);
1243 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1245 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1246 void *ostate = NULL;
1247 pa_source_output *o;
1250 if (m && m->chunk.memblock) {
1252 pa_memblock_ref(c.memblock);
1253 pa_assert(result->length <= c.length);
1254 c.length = result->length;
1256 pa_memchunk_make_writable(&c, 0);
1257 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1260 pa_memblock_ref(c.memblock);
1261 pa_assert(result->length <= c.length);
1262 c.length = result->length;
1265 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1266 pa_source_output_assert_ref(o);
1267 pa_assert(o->direct_on_input == i);
1268 pa_source_post_direct(s->monitor_source, o, &c);
1271 pa_memblock_unref(c.memblock);
1276 if (m->chunk.memblock) {
1277 pa_memblock_unref(m->chunk.memblock);
1278 pa_memchunk_reset(&m->chunk);
1281 pa_sink_input_unref(m->userdata);
1288 /* Now drop references to entries that are included in the
1289 * pa_mix_info array but don't exist anymore */
1291 if (n_unreffed < n) {
1292 for (; n > 0; info++, n--) {
1294 pa_sink_input_unref(info->userdata);
1295 if (info->chunk.memblock)
1296 pa_memblock_unref(info->chunk.memblock);
1300 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1301 pa_source_post(s->monitor_source, result);
1304 /* Called from IO thread context */
1305 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1306 pa_mix_info info[MAX_MIX_CHANNELS];
1308 size_t block_size_max;
1310 pa_sink_assert_ref(s);
1311 pa_sink_assert_io_context(s);
1312 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1313 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1316 pa_assert(!s->thread_info.rewind_requested);
1317 pa_assert(s->thread_info.rewind_nbytes == 0);
1319 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1320 result->memblock = pa_memblock_ref(s->silence.memblock);
1321 result->index = s->silence.index;
1322 result->length = PA_MIN(s->silence.length, length);
1329 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1331 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1332 if (length > block_size_max)
1333 length = pa_frame_align(block_size_max, &s->sample_spec);
1335 pa_assert(length > 0);
1337 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1341 *result = s->silence;
1342 pa_memblock_ref(result->memblock);
1344 if (result->length > length)
1345 result->length = length;
1347 } else if (n == 1) {
1350 *result = info[0].chunk;
1351 pa_memblock_ref(result->memblock);
1353 if (result->length > length)
1354 result->length = length;
1356 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1358 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1359 pa_memblock_unref(result->memblock);
1360 pa_silence_memchunk_get(&s->core->silence_cache,
1365 } else if (!pa_cvolume_is_norm(&volume)) {
1366 pa_memchunk_make_writable(result, 0);
1367 pa_volume_memchunk(result, &s->sample_spec, &volume);
1371 result->memblock = pa_memblock_new(s->core->mempool, length);
1373 ptr = pa_memblock_acquire(result->memblock);
1374 result->length = pa_mix(info, n,
1377 &s->thread_info.soft_volume,
1378 s->thread_info.soft_muted);
1379 pa_memblock_release(result->memblock);
1384 inputs_drop(s, info, n, result);
1386 #ifdef TIZEN_PCM_DUMP
1387 pa_sink_write_pcm_dump(s, result);
1392 /* Called from IO thread context */
1393 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1394 pa_mix_info info[MAX_MIX_CHANNELS];
1396 size_t length, block_size_max;
1398 pa_sink_assert_ref(s);
1399 pa_sink_assert_io_context(s);
1400 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1402 pa_assert(target->memblock);
1403 pa_assert(target->length > 0);
1404 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1406 pa_assert(!s->thread_info.rewind_requested);
1407 pa_assert(s->thread_info.rewind_nbytes == 0);
1409 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1410 pa_silence_memchunk(target, &s->sample_spec);
1416 length = target->length;
1417 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1418 if (length > block_size_max)
1419 length = pa_frame_align(block_size_max, &s->sample_spec);
1421 pa_assert(length > 0);
1423 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1426 if (target->length > length)
1427 target->length = length;
1429 pa_silence_memchunk(target, &s->sample_spec);
1430 } else if (n == 1) {
1433 if (target->length > length)
1434 target->length = length;
1436 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1438 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1439 pa_silence_memchunk(target, &s->sample_spec);
1443 vchunk = info[0].chunk;
1444 pa_memblock_ref(vchunk.memblock);
1446 if (vchunk.length > length)
1447 vchunk.length = length;
1449 if (!pa_cvolume_is_norm(&volume)) {
1450 pa_memchunk_make_writable(&vchunk, 0);
1451 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1454 pa_memchunk_memcpy(target, &vchunk);
1455 pa_memblock_unref(vchunk.memblock);
1461 ptr = pa_memblock_acquire(target->memblock);
1463 target->length = pa_mix(info, n,
1464 (uint8_t*) ptr + target->index, length,
1466 &s->thread_info.soft_volume,
1467 s->thread_info.soft_muted);
1469 pa_memblock_release(target->memblock);
1472 inputs_drop(s, info, n, target);
1474 #ifdef TIZEN_PCM_DUMP
1475 pa_sink_write_pcm_dump(s, target);
1480 /* Called from IO thread context */
1481 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1485 pa_sink_assert_ref(s);
1486 pa_sink_assert_io_context(s);
1487 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1489 pa_assert(target->memblock);
1490 pa_assert(target->length > 0);
1491 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1493 pa_assert(!s->thread_info.rewind_requested);
1494 pa_assert(s->thread_info.rewind_nbytes == 0);
1496 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1497 pa_silence_memchunk(target, &s->sample_spec);
1510 pa_sink_render_into(s, &chunk);
1519 /* Called from IO thread context */
1520 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1521 pa_sink_assert_ref(s);
1522 pa_sink_assert_io_context(s);
1523 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1524 pa_assert(length > 0);
1525 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1528 pa_assert(!s->thread_info.rewind_requested);
1529 pa_assert(s->thread_info.rewind_nbytes == 0);
1533 pa_sink_render(s, length, result);
1535 if (result->length < length) {
1538 pa_memchunk_make_writable(result, length);
1540 chunk.memblock = result->memblock;
1541 chunk.index = result->index + result->length;
1542 chunk.length = length - result->length;
1544 pa_sink_render_into_full(s, &chunk);
1546 result->length = length;
1552 /* Called from main thread */
1553 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1554 pa_sample_spec desired_spec;
1555 uint32_t default_rate = s->default_sample_rate;
1556 uint32_t alternate_rate = s->alternate_sample_rate;
1559 bool default_rate_is_usable = false;
1560 bool alternate_rate_is_usable = false;
1561 bool avoid_resampling = s->avoid_resampling;
1563 if (pa_sample_spec_equal(spec, &s->sample_spec))
1566 if (!s->reconfigure)
1570 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1571 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1576 if (PA_SINK_IS_RUNNING(s->state)) {
1577 pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1578 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1582 if (s->monitor_source) {
1583 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1584 pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1589 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1592 desired_spec = s->sample_spec;
1595 if (!avoid_resampling) {
1596 default_rate = alternate_rate = s->selected_sample_rate;
1597 desired_spec.format = s->selected_sample_format;
1601 /* We have to try to use the sink input format and rate */
1602 desired_spec.format = spec->format;
1603 desired_spec.rate = spec->rate;
1605 } else if (avoid_resampling) {
1606 /* We just try to set the sink input's sample rate if it's not too low */
1607 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1608 desired_spec.rate = spec->rate;
1609 desired_spec.format = spec->format;
1611 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1612 /* We can directly try to use this rate */
1613 desired_spec.rate = spec->rate;
1617 if (desired_spec.rate != spec->rate) {
1618 /* See if we can pick a rate that results in less resampling effort */
1619 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1620 default_rate_is_usable = true;
1621 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1622 default_rate_is_usable = true;
1623 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1624 alternate_rate_is_usable = true;
1625 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1626 alternate_rate_is_usable = true;
1628 if (alternate_rate_is_usable && !default_rate_is_usable)
1629 desired_spec.rate = alternate_rate;
1631 desired_spec.rate = default_rate;
1634 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1637 pa_log_info("desired spec is same as sink->sample_spec");
1644 if (!passthrough && pa_sink_used_by(s) > 0)
1647 pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1648 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1649 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1651 s->reconfigure(s, &desired_spec, passthrough);
1653 /* update monitor source as well */
1654 if (s->monitor_source && !passthrough)
1655 pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1656 pa_log_info("Reconfigured successfully");
1658 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1659 if (i->state == PA_SINK_INPUT_CORKED)
1660 pa_sink_input_update_resampler(i);
1663 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1666 /* Called from main thread */
1667 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1670 pa_sink_assert_ref(s);
1671 pa_assert_ctl_context();
1672 pa_assert(PA_SINK_IS_LINKED(s->state));
1674 /* The returned value is supposed to be in the time domain of the sound card! */
1676 if (s->state == PA_SINK_SUSPENDED)
1679 if (!(s->flags & PA_SINK_LATENCY))
1682 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1684 /* the return value is unsigned, so check that the offset can be added to usec without
1686 if (-s->port_latency_offset <= usec)
1687 usec += s->port_latency_offset;
1691 return (pa_usec_t)usec;
1694 /* Called from IO thread */
1695 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1699 pa_sink_assert_ref(s);
1700 pa_sink_assert_io_context(s);
1701 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1703 /* The returned value is supposed to be in the time domain of the sound card! */
1705 if (s->thread_info.state == PA_SINK_SUSPENDED)
1708 if (!(s->flags & PA_SINK_LATENCY))
1711 o = PA_MSGOBJECT(s);
1713 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1715 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1717 /* If allow_negative is false, the call should only return positive values, */
1718 usec += s->thread_info.port_latency_offset;
1719 if (!allow_negative && usec < 0)
1725 /* Called from the main thread (and also from the IO thread while the main
1726 * thread is waiting).
1728 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1729 * set. Instead, flat volume mode is detected by checking whether the root sink
1730 * has the flag set. */
1731 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1732 pa_sink_assert_ref(s);
1734 s = pa_sink_get_master(s);
1737 return (s->flags & PA_SINK_FLAT_VOLUME);
1742 /* Called from the main thread (and also from the IO thread while the main
1743 * thread is waiting). */
1744 pa_sink *pa_sink_get_master(pa_sink *s) {
1745 pa_sink_assert_ref(s);
1747 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1748 if (PA_UNLIKELY(!s->input_to_master))
1751 s = s->input_to_master->sink;
1757 /* Called from main context */
1758 bool pa_sink_is_filter(pa_sink *s) {
1759 pa_sink_assert_ref(s);
1761 return (s->input_to_master != NULL);
1764 /* Called from main context */
1765 bool pa_sink_is_passthrough(pa_sink *s) {
1766 pa_sink_input *alt_i;
1769 pa_sink_assert_ref(s);
1771 /* one and only one PASSTHROUGH input can possibly be connected */
1772 if (pa_idxset_size(s->inputs) == 1) {
1773 alt_i = pa_idxset_first(s->inputs, &idx);
1775 if (pa_sink_input_is_passthrough(alt_i))
1782 /* Called from main context */
1783 void pa_sink_enter_passthrough(pa_sink *s) {
1786 /* The sink implementation is reconfigured for passthrough in
1787 * pa_sink_reconfigure(). This function sets the PA core objects to
1788 * passthrough mode. */
1790 /* disable the monitor in passthrough mode */
1791 if (s->monitor_source) {
1792 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1793 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1796 /* set the volume to NORM */
1797 s->saved_volume = *pa_sink_get_volume(s, true);
1798 s->saved_save_volume = s->save_volume;
1800 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1801 pa_sink_set_volume(s, &volume, true, false);
1803 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1806 /* Called from main context */
1807 void pa_sink_leave_passthrough(pa_sink *s) {
1808 /* Unsuspend monitor */
1809 if (s->monitor_source) {
1810 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1811 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1814 /* Restore sink volume to what it was before we entered passthrough mode */
1815 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1817 pa_cvolume_init(&s->saved_volume);
1818 s->saved_save_volume = false;
1822 /* Called from main context. */
1823 static void compute_reference_ratio(pa_sink_input *i) {
1825 pa_cvolume remapped;
1829 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1832 * Calculates the reference ratio from the sink's reference
1833 * volume. This basically calculates:
1835 * i->reference_ratio = i->volume / i->sink->reference_volume
1838 remapped = i->sink->reference_volume;
1839 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1841 ratio = i->reference_ratio;
1843 for (c = 0; c < i->sample_spec.channels; c++) {
1845 /* We don't update when the sink volume is 0 anyway */
1846 if (remapped.values[c] <= PA_VOLUME_MUTED)
1849 /* Don't update the reference ratio unless necessary */
1850 if (pa_sw_volume_multiply(
1852 remapped.values[c]) == i->volume.values[c])
1855 ratio.values[c] = pa_sw_volume_divide(
1856 i->volume.values[c],
1857 remapped.values[c]);
1860 pa_sink_input_set_reference_ratio(i, &ratio);
1863 /* Called from main context. Only called for the root sink in volume sharing
1864 * cases, except for internal recursive calls. */
1865 static void compute_reference_ratios(pa_sink *s) {
1869 pa_sink_assert_ref(s);
1870 pa_assert_ctl_context();
1871 pa_assert(PA_SINK_IS_LINKED(s->state));
1872 pa_assert(pa_sink_flat_volume_enabled(s));
1874 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1875 compute_reference_ratio(i);
1877 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1878 && PA_SINK_IS_LINKED(i->origin_sink->state))
1879 compute_reference_ratios(i->origin_sink);
1883 /* Called from main context. Only called for the root sink in volume sharing
1884 * cases, except for internal recursive calls. */
1885 static void compute_real_ratios(pa_sink *s) {
1889 pa_sink_assert_ref(s);
1890 pa_assert_ctl_context();
1891 pa_assert(PA_SINK_IS_LINKED(s->state));
1892 pa_assert(pa_sink_flat_volume_enabled(s));
1894 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1896 pa_cvolume remapped;
1898 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1899 /* The origin sink uses volume sharing, so this input's real ratio
1900 * is handled as a special case - the real ratio must be 0 dB, and
1901 * as a result i->soft_volume must equal i->volume_factor. */
1902 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1903 i->soft_volume = i->volume_factor;
1905 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1906 compute_real_ratios(i->origin_sink);
1912 * This basically calculates:
1914 * i->real_ratio := i->volume / s->real_volume
1915 * i->soft_volume := i->real_ratio * i->volume_factor
1918 remapped = s->real_volume;
1919 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1921 i->real_ratio.channels = i->sample_spec.channels;
1922 i->soft_volume.channels = i->sample_spec.channels;
1924 for (c = 0; c < i->sample_spec.channels; c++) {
1926 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1927 /* We leave i->real_ratio untouched */
1928 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1932 /* Don't lose accuracy unless necessary */
1933 if (pa_sw_volume_multiply(
1934 i->real_ratio.values[c],
1935 remapped.values[c]) != i->volume.values[c])
1937 i->real_ratio.values[c] = pa_sw_volume_divide(
1938 i->volume.values[c],
1939 remapped.values[c]);
1941 i->soft_volume.values[c] = pa_sw_volume_multiply(
1942 i->real_ratio.values[c],
1943 i->volume_factor.values[c]);
1946 /* We don't copy the soft_volume to the thread_info data
1947 * here. That must be done by the caller */
1951 static pa_cvolume *cvolume_remap_minimal_impact(
1953 const pa_cvolume *template,
1954 const pa_channel_map *from,
1955 const pa_channel_map *to) {
1960 pa_assert(template);
1963 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1964 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1966 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1967 * mapping from sink input to sink volumes:
1969 * If template is a possible remapping from v it is used instead
1970 * of remapping anew.
1972 * If the channel maps don't match we set an all-channel volume on
1973 * the sink to ensure that changing a volume on one stream has no
1974 * effect that cannot be compensated for in another stream that
1975 * does not have the same channel map as the sink. */
1977 if (pa_channel_map_equal(from, to))
1981 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1986 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1990 /* Called from main thread. Only called for the root sink in volume sharing
1991 * cases, except for internal recursive calls. */
1992 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1996 pa_sink_assert_ref(s);
1997 pa_assert(max_volume);
1998 pa_assert(channel_map);
1999 pa_assert(pa_sink_flat_volume_enabled(s));
2001 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2002 pa_cvolume remapped;
2004 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2005 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2006 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
2008 /* Ignore this input. The origin sink uses volume sharing, so this
2009 * input's volume will be set to be equal to the root sink's real
2010 * volume. Obviously this input's current volume must not then
2011 * affect what the root sink's real volume will be. */
2015 remapped = i->volume;
2016 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
2017 pa_cvolume_merge(max_volume, max_volume, &remapped);
2021 /* Called from main thread. Only called for the root sink in volume sharing
2022 * cases, except for internal recursive calls. */
2023 static bool has_inputs(pa_sink *s) {
2027 pa_sink_assert_ref(s);
2029 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2030 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2037 /* Called from main thread. Only called for the root sink in volume sharing
2038 * cases, except for internal recursive calls. */
2039 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2043 pa_sink_assert_ref(s);
2044 pa_assert(new_volume);
2045 pa_assert(channel_map);
2047 s->real_volume = *new_volume;
2048 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2050 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2051 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2052 if (pa_sink_flat_volume_enabled(s)) {
2053 pa_cvolume new_input_volume;
2055 /* Follow the root sink's real volume. */
2056 new_input_volume = *new_volume;
2057 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2058 pa_sink_input_set_volume_direct(i, &new_input_volume);
2059 compute_reference_ratio(i);
2062 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2063 update_real_volume(i->origin_sink, new_volume, channel_map);
2068 /* Called from main thread. Only called for the root sink in shared volume
2070 static void compute_real_volume(pa_sink *s) {
2071 pa_sink_assert_ref(s);
2072 pa_assert_ctl_context();
2073 pa_assert(PA_SINK_IS_LINKED(s->state));
2074 pa_assert(pa_sink_flat_volume_enabled(s));
2075 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2077 /* This determines the maximum volume of all streams and sets
2078 * s->real_volume accordingly. */
2080 if (!has_inputs(s)) {
2081 /* In the special case that we have no sink inputs we leave the
2082 * volume unmodified. */
2083 update_real_volume(s, &s->reference_volume, &s->channel_map);
2087 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2089 /* First let's determine the new maximum volume of all inputs
2090 * connected to this sink */
2091 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2092 update_real_volume(s, &s->real_volume, &s->channel_map);
2094 /* Then, let's update the real ratios/soft volumes of all inputs
2095 * connected to this sink */
2096 compute_real_ratios(s);
2099 /* Called from main thread. Only called for the root sink in shared volume
2100 * cases, except for internal recursive calls. */
2101 static void propagate_reference_volume(pa_sink *s) {
2105 pa_sink_assert_ref(s);
2106 pa_assert_ctl_context();
2107 pa_assert(PA_SINK_IS_LINKED(s->state));
2108 pa_assert(pa_sink_flat_volume_enabled(s));
2110 /* This is called whenever the sink volume changes that is not
2111 * caused by a sink input volume change. We need to fix up the
2112 * sink input volumes accordingly */
2114 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2115 pa_cvolume new_volume;
2117 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2118 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2119 propagate_reference_volume(i->origin_sink);
2121 /* Since the origin sink uses volume sharing, this input's volume
2122 * needs to be updated to match the root sink's real volume, but
2123 * that will be done later in update_real_volume(). */
2127 /* This basically calculates:
2129 * i->volume := s->reference_volume * i->reference_ratio */
2131 new_volume = s->reference_volume;
2132 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2133 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2134 pa_sink_input_set_volume_direct(i, &new_volume);
2138 /* Called from main thread. Only called for the root sink in volume sharing
2139 * cases, except for internal recursive calls. The return value indicates
2140 * whether any reference volume actually changed. */
2141 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2143 bool reference_volume_changed;
2147 pa_sink_assert_ref(s);
2148 pa_assert(PA_SINK_IS_LINKED(s->state));
2150 pa_assert(channel_map);
2151 pa_assert(pa_cvolume_valid(v));
2154 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2156 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2157 pa_sink_set_reference_volume_direct(s, &volume);
2159 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2161 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2162 /* If the root sink's volume doesn't change, then there can't be any
2163 * changes in the other sinks in the sink tree either.
2165 * It's probably theoretically possible that even if the root sink's
2166 * volume changes slightly, some filter sink doesn't change its volume
2167 * due to rounding errors. If that happens, we still want to propagate
2168 * the changed root sink volume to the sinks connected to the
2169 * intermediate sink that didn't change its volume. This theoretical
2170 * possibility is the reason why we have that !(s->flags &
2171 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2172 * notice even if we returned here false always if
2173 * reference_volume_changed is false. */
2176 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2177 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2178 && PA_SINK_IS_LINKED(i->origin_sink->state))
2179 update_reference_volume(i->origin_sink, v, channel_map, false);
2185 /* Called from main thread */
2186 void pa_sink_set_volume(
2188 const pa_cvolume *volume,
2192 pa_cvolume new_reference_volume;
2195 pa_sink_assert_ref(s);
2196 pa_assert_ctl_context();
2197 pa_assert(PA_SINK_IS_LINKED(s->state));
2198 pa_assert(!volume || pa_cvolume_valid(volume));
2199 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2200 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2202 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2203 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2204 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2205 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2209 /* In case of volume sharing, the volume is set for the root sink first,
2210 * from which it's then propagated to the sharing sinks. */
2211 root_sink = pa_sink_get_master(s);
2213 if (PA_UNLIKELY(!root_sink))
2216 /* As a special exception we accept mono volumes on all sinks --
2217 * even on those with more complex channel maps */
2220 if (pa_cvolume_compatible(volume, &s->sample_spec))
2221 new_reference_volume = *volume;
2223 new_reference_volume = s->reference_volume;
2224 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2227 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2229 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2230 if (pa_sink_flat_volume_enabled(root_sink)) {
2231 /* OK, propagate this volume change back to the inputs */
2232 propagate_reference_volume(root_sink);
2234 /* And now recalculate the real volume */
2235 compute_real_volume(root_sink);
2237 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2241 /* If volume is NULL we synchronize the sink's real and
2242 * reference volumes with the stream volumes. */
2244 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2246 /* Ok, let's determine the new real volume */
2247 compute_real_volume(root_sink);
2249 /* Let's 'push' the reference volume if necessary */
2250 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2251 /* If the sink and its root don't have the same number of channels, we need to remap */
2252 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2253 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2254 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2256 /* Now that the reference volume is updated, we can update the streams'
2257 * reference ratios. */
2258 compute_reference_ratios(root_sink);
2261 if (root_sink->set_volume) {
2262 /* If we have a function set_volume(), then we do not apply a
2263 * soft volume by default. However, set_volume() is free to
2264 * apply one to root_sink->soft_volume */
2266 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2267 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2268 root_sink->set_volume(root_sink);
2271 /* If we have no function set_volume(), then the soft volume
2272 * becomes the real volume */
2273 root_sink->soft_volume = root_sink->real_volume;
2275 /* This tells the sink that soft volume and/or real volume changed */
2277 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2280 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2281 * Only to be called by sink implementor */
2282 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2284 pa_sink_assert_ref(s);
2285 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2287 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2288 pa_sink_assert_io_context(s);
2290 pa_assert_ctl_context();
2293 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2295 s->soft_volume = *volume;
2297 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2298 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2300 s->thread_info.soft_volume = s->soft_volume;
2303 /* Called from the main thread. Only called for the root sink in volume sharing
2304 * cases, except for internal recursive calls. */
2305 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2309 pa_sink_assert_ref(s);
2310 pa_assert(old_real_volume);
2311 pa_assert_ctl_context();
2312 pa_assert(PA_SINK_IS_LINKED(s->state));
2314 /* This is called when the hardware's real volume changes due to
2315 * some external event. We copy the real volume into our
2316 * reference volume and then rebuild the stream volumes based on
2317 * i->real_ratio which should stay fixed. */
2319 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2320 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2323 /* 1. Make the real volume the reference volume */
2324 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2327 if (pa_sink_flat_volume_enabled(s)) {
2329 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2330 pa_cvolume new_volume;
2332 /* 2. Since the sink's reference and real volumes are equal
2333 * now our ratios should be too. */
2334 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2336 /* 3. Recalculate the new stream reference volume based on the
2337 * reference ratio and the sink's reference volume.
2339 * This basically calculates:
2341 * i->volume = s->reference_volume * i->reference_ratio
2343 * This is identical to propagate_reference_volume() */
2344 new_volume = s->reference_volume;
2345 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2346 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2347 pa_sink_input_set_volume_direct(i, &new_volume);
2349 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2350 && PA_SINK_IS_LINKED(i->origin_sink->state))
2351 propagate_real_volume(i->origin_sink, old_real_volume);
2355 /* Something got changed in the hardware. It probably makes sense
2356 * to save changed hw settings given that hw volume changes not
2357 * triggered by PA are almost certainly done by the user. */
2358 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2359 s->save_volume = true;
2362 /* Called from io thread */
2363 void pa_sink_update_volume_and_mute(pa_sink *s) {
2365 pa_sink_assert_io_context(s);
2367 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2370 /* Called from main thread */
2371 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2372 pa_sink_assert_ref(s);
2373 pa_assert_ctl_context();
2374 pa_assert(PA_SINK_IS_LINKED(s->state));
2376 if (s->refresh_volume || force_refresh) {
2377 struct pa_cvolume old_real_volume;
2379 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2381 old_real_volume = s->real_volume;
2383 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2386 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2388 update_real_volume(s, &s->real_volume, &s->channel_map);
2389 propagate_real_volume(s, &old_real_volume);
2392 return &s->reference_volume;
2395 /* Called from main thread. In volume sharing cases, only the root sink may
2397 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2398 pa_cvolume old_real_volume;
2400 pa_sink_assert_ref(s);
2401 pa_assert_ctl_context();
2402 pa_assert(PA_SINK_IS_LINKED(s->state));
2403 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2405 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2407 old_real_volume = s->real_volume;
2408 update_real_volume(s, new_real_volume, &s->channel_map);
2409 propagate_real_volume(s, &old_real_volume);
2412 /* Called from main thread */
2413 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2416 pa_sink_assert_ref(s);
2417 pa_assert_ctl_context();
2419 old_muted = s->muted;
2421 if (mute == old_muted) {
2422 s->save_muted |= save;
2427 s->save_muted = save;
2429 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2430 s->set_mute_in_progress = true;
2432 s->set_mute_in_progress = false;
2435 if (!PA_SINK_IS_LINKED(s->state))
2438 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2439 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2440 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2441 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2444 /* Called from main thread */
2445 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2447 pa_sink_assert_ref(s);
2448 pa_assert_ctl_context();
2449 pa_assert(PA_SINK_IS_LINKED(s->state));
2451 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2454 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2455 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2456 pa_sink_mute_changed(s, mute);
2458 if (s->get_mute(s, &mute) >= 0)
2459 pa_sink_mute_changed(s, mute);
2466 /* Called from main thread */
2467 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2468 pa_sink_assert_ref(s);
2469 pa_assert_ctl_context();
2470 pa_assert(PA_SINK_IS_LINKED(s->state));
2472 if (s->set_mute_in_progress)
2475 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2476 * but we must have this here also, because the save parameter of
2477 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2478 * the mute state when it shouldn't be saved). */
2479 if (new_muted == s->muted)
2482 pa_sink_set_mute(s, new_muted, true);
2485 /* Called from main thread */
2486 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2487 pa_sink_assert_ref(s);
2488 pa_assert_ctl_context();
2491 pa_proplist_update(s->proplist, mode, p);
2493 if (PA_SINK_IS_LINKED(s->state)) {
2494 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2495 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2501 /* Called from main thread */
2502 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2503 void pa_sink_set_description(pa_sink *s, const char *description) {
2505 pa_sink_assert_ref(s);
2506 pa_assert_ctl_context();
2508 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2511 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2513 if (old && description && pa_streq(old, description))
2517 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2519 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2521 if (s->monitor_source) {
2524 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2525 pa_source_set_description(s->monitor_source, n);
2529 if (PA_SINK_IS_LINKED(s->state)) {
2530 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2531 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2535 /* Called from main thread */
2536 unsigned pa_sink_linked_by(pa_sink *s) {
2539 pa_sink_assert_ref(s);
2540 pa_assert_ctl_context();
2541 pa_assert(PA_SINK_IS_LINKED(s->state));
2543 ret = pa_idxset_size(s->inputs);
2545 /* We add in the number of streams connected to us here. Please
2546 * note the asymmetry to pa_sink_used_by()! */
2548 if (s->monitor_source)
2549 ret += pa_source_linked_by(s->monitor_source);
2554 /* Called from main thread */
2555 unsigned pa_sink_used_by(pa_sink *s) {
2558 pa_sink_assert_ref(s);
2559 pa_assert_ctl_context();
2560 pa_assert(PA_SINK_IS_LINKED(s->state));
2562 ret = pa_idxset_size(s->inputs);
2563 pa_assert(ret >= s->n_corked);
2565 /* Streams connected to our monitor source do not matter for
2566 * pa_sink_used_by()!.*/
2568 return ret - s->n_corked;
2571 /* Called from main thread */
2572 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2577 pa_sink_assert_ref(s);
2578 pa_assert_ctl_context();
2580 if (!PA_SINK_IS_LINKED(s->state))
2585 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2586 if (i == ignore_input)
2589 /* We do not assert here. It is perfectly valid for a sink input to
2590 * be in the INIT state (i.e. created, marked done but not yet put)
2591 * and we should not care if it's unlinked as it won't contribute
2592 * towards our busy status.
2594 if (!PA_SINK_INPUT_IS_LINKED(i->state))
2597 if (i->state == PA_SINK_INPUT_CORKED)
2600 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2606 if (s->monitor_source)
2607 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2612 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2614 case PA_SINK_INIT: return "INIT";
2615 case PA_SINK_IDLE: return "IDLE";
2616 case PA_SINK_RUNNING: return "RUNNING";
2617 case PA_SINK_SUSPENDED: return "SUSPENDED";
2618 case PA_SINK_UNLINKED: return "UNLINKED";
2619 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2622 pa_assert_not_reached();
2625 /* Called from the IO thread */
2626 static void sync_input_volumes_within_thread(pa_sink *s) {
2630 pa_sink_assert_ref(s);
2631 pa_sink_assert_io_context(s);
2633 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2634 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2637 i->thread_info.soft_volume = i->soft_volume;
2638 pa_sink_input_request_rewind(i, 0, true, false, false);
2642 /* Called from the IO thread. Only called for the root sink in volume sharing
2643 * cases, except for internal recursive calls. */
2644 static void set_shared_volume_within_thread(pa_sink *s) {
2645 pa_sink_input *i = NULL;
2648 pa_sink_assert_ref(s);
2650 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2652 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2653 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2654 set_shared_volume_within_thread(i->origin_sink);
2658 /* Called from IO thread, except when it is not */
2659 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2660 pa_sink *s = PA_SINK(o);
2661 pa_sink_assert_ref(s);
2663 switch ((pa_sink_message_t) code) {
2665 case PA_SINK_MESSAGE_ADD_INPUT: {
2666 pa_sink_input *i = PA_SINK_INPUT(userdata);
2668 /* If you change anything here, make sure to change the
2669 * sink input handling a few lines down at
2670 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2672 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2674 /* Since the caller sleeps in pa_sink_input_put(), we can
2675 * safely access data outside of thread_info even though
2678 if ((i->thread_info.sync_prev = i->sync_prev)) {
2679 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2680 pa_assert(i->sync_prev->sync_next == i);
2681 i->thread_info.sync_prev->thread_info.sync_next = i;
2684 if ((i->thread_info.sync_next = i->sync_next)) {
2685 pa_assert(i->sink == i->thread_info.sync_next->sink);
2686 pa_assert(i->sync_next->sync_prev == i);
2687 i->thread_info.sync_next->thread_info.sync_prev = i;
2690 pa_sink_input_attach(i);
2692 pa_sink_input_set_state_within_thread(i, i->state);
2694 /* The requested latency of the sink input needs to be fixed up and
2695 * then configured on the sink. If this causes the sink latency to
2696 * go down, the sink implementor is responsible for doing a rewind
2697 * in the update_requested_latency() callback to ensure that the
2698 * sink buffer doesn't contain more data than what the new latency
2701 * XXX: Does it really make sense to push this responsibility to
2702 * the sink implementors? Wouldn't it be better to do it once in
2703 * the core than many times in the modules? */
2705 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2706 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2708 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2709 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2711 /* We don't rewind here automatically. This is left to the
2712 * sink input implementor because some sink inputs need a
2713 * slow start, i.e. need some time to buffer client
2714 * samples before beginning streaming.
2716 * XXX: Does it really make sense to push this functionality to
2717 * the sink implementors? Wouldn't it be better to do it once in
2718 * the core than many times in the modules? */
2720 /* In flat volume mode we need to update the volume as
2722 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2725 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2726 pa_sink_input *i = PA_SINK_INPUT(userdata);
2728 /* If you change anything here, make sure to change the
2729 * sink input handling a few lines down at
2730 * PA_SINK_MESSAGE_START_MOVE, too. */
2732 pa_sink_input_detach(i);
2734 pa_sink_input_set_state_within_thread(i, i->state);
2736 /* Since the caller sleeps in pa_sink_input_unlink(),
2737 * we can safely access data outside of thread_info even
2738 * though it is mutable */
2740 pa_assert(!i->sync_prev);
2741 pa_assert(!i->sync_next);
2743 if (i->thread_info.sync_prev) {
2744 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2745 i->thread_info.sync_prev = NULL;
2748 if (i->thread_info.sync_next) {
2749 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2750 i->thread_info.sync_next = NULL;
2753 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2754 pa_sink_invalidate_requested_latency(s, true);
2755 pa_sink_request_rewind(s, (size_t) -1);
2757 /* In flat volume mode we need to update the volume as
2759 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2762 case PA_SINK_MESSAGE_START_MOVE: {
2763 pa_sink_input *i = PA_SINK_INPUT(userdata);
2765 /* We don't support moving synchronized streams. */
2766 pa_assert(!i->sync_prev);
2767 pa_assert(!i->sync_next);
2768 pa_assert(!i->thread_info.sync_next);
2769 pa_assert(!i->thread_info.sync_prev);
2771 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2773 size_t sink_nbytes, total_nbytes;
2775 /* The old sink probably has some audio from this
2776 * stream in its buffer. We want to "take it back" as
2777 * much as possible and play it to the new sink. We
2778 * don't know at this point how much the old sink can
2779 * rewind. We have to pick something, and that
2780 * something is the full latency of the old sink here.
2781 * So we rewind the stream buffer by the sink latency
2782 * amount, which may be more than what we should
2783 * rewind. This can result in a chunk of audio being
2784 * played both to the old sink and the new sink.
2786 * FIXME: Fix this code so that we don't have to make
2787 * guesses about how much the sink will actually be
2788 * able to rewind. If someone comes up with a solution
2789 * for this, something to note is that the part of the
2790 * latency that the old sink couldn't rewind should
2791 * ideally be compensated after the stream has moved
2792 * to the new sink by adding silence. The new sink
2793 * most likely can't start playing the moved stream
2794 * immediately, and that gap should be removed from
2795 * the "compensation silence" (at least at the time of
2796 * writing this, the move finish code will actually
2797 * already take care of dropping the new sink's
2798 * unrewindable latency, so taking into account the
2799 * unrewindable latency of the old sink is the only
2802 * The render_memblockq contents are discarded,
2803 * because when the sink changes, the format of the
2804 * audio stored in the render_memblockq may change
2805 * too, making the stored audio invalid. FIXME:
2806 * However, the read and write indices are moved back
2807 * the same amount, so if they are not the same now,
2808 * they won't be the same after the rewind either. If
2809 * the write index of the render_memblockq is ahead of
2810 * the read index, then the render_memblockq will feed
2811 * the new sink some silence first, which it shouldn't
2812 * do. The write index should be flushed to be the
2813 * same as the read index. */
2815 /* Get the latency of the sink */
2816 usec = pa_sink_get_latency_within_thread(s, false);
2817 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2818 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2820 if (total_nbytes > 0) {
2821 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2822 i->thread_info.rewrite_flush = true;
2823 pa_sink_input_process_rewind(i, sink_nbytes);
2827 pa_sink_input_detach(i);
2829 /* Let's remove the sink input ...*/
2830 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2832 pa_sink_invalidate_requested_latency(s, true);
2834 pa_log_debug("Requesting rewind due to started move");
2835 pa_sink_request_rewind(s, (size_t) -1);
2837 /* In flat volume mode we need to update the volume as
2839 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2842 case PA_SINK_MESSAGE_FINISH_MOVE: {
2843 pa_sink_input *i = PA_SINK_INPUT(userdata);
2845 /* We don't support moving synchronized streams. */
2846 pa_assert(!i->sync_prev);
2847 pa_assert(!i->sync_next);
2848 pa_assert(!i->thread_info.sync_next);
2849 pa_assert(!i->thread_info.sync_prev);
2851 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2853 pa_sink_input_attach(i);
2855 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2859 /* In the ideal case the new sink would start playing
2860 * the stream immediately. That requires the sink to
2861 * be able to rewind all of its latency, which usually
2862 * isn't possible, so there will probably be some gap
2863 * before the moved stream becomes audible. We then
2864 * have two possibilities: 1) start playing the stream
2865 * from where it is now, or 2) drop the unrewindable
2866 * latency of the sink from the stream. With option 1
2867 * we won't lose any audio but the stream will have a
2868 * pause. With option 2 we may lose some audio but the
2869 * stream time will be somewhat in sync with the wall
2870 * clock. Lennart seems to have chosen option 2 (one
2871 * of the reasons might have been that option 1 is
2872 * actually much harder to implement), so we drop the
2873 * latency of the new sink from the moved stream and
2874 * hope that the sink will undo most of that in the
2877 /* Get the latency of the sink */
2878 usec = pa_sink_get_latency_within_thread(s, false);
2879 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2882 pa_sink_input_drop(i, nbytes);
2884 pa_log_debug("Requesting rewind due to finished move");
2885 pa_sink_request_rewind(s, nbytes);
2888 /* Updating the requested sink latency has to be done
2889 * after the sink rewind request, not before, because
2890 * otherwise the sink may limit the rewind amount
2893 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2894 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2896 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2897 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2899 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2902 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2903 pa_sink *root_sink = pa_sink_get_master(s);
2905 if (PA_LIKELY(root_sink))
2906 set_shared_volume_within_thread(root_sink);
2911 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2913 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2915 pa_sink_volume_change_push(s);
2917 /* Fall through ... */
2919 case PA_SINK_MESSAGE_SET_VOLUME:
2921 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2922 s->thread_info.soft_volume = s->soft_volume;
2923 pa_sink_request_rewind(s, (size_t) -1);
2926 /* Fall through ... */
2928 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2929 sync_input_volumes_within_thread(s);
2932 case PA_SINK_MESSAGE_GET_VOLUME:
2934 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2936 pa_sink_volume_change_flush(s);
2937 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2940 /* In case sink implementor reset SW volume. */
2941 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2942 s->thread_info.soft_volume = s->soft_volume;
2943 pa_sink_request_rewind(s, (size_t) -1);
2948 case PA_SINK_MESSAGE_SET_MUTE:
2950 if (s->thread_info.soft_muted != s->muted) {
2951 s->thread_info.soft_muted = s->muted;
2952 pa_sink_request_rewind(s, (size_t) -1);
2955 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2960 case PA_SINK_MESSAGE_GET_MUTE:
2962 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2963 return s->get_mute(s, userdata);
2967 case PA_SINK_MESSAGE_SET_STATE: {
2968 struct set_state_data *data = userdata;
2969 bool suspend_change =
2970 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2971 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2973 if (s->set_state_in_io_thread) {
2976 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2980 s->thread_info.state = data->state;
2982 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2983 s->thread_info.rewind_nbytes = 0;
2984 s->thread_info.rewind_requested = false;
2987 if (suspend_change) {
2991 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2992 if (i->suspend_within_thread)
2993 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2999 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
3001 pa_usec_t *usec = userdata;
3002 *usec = pa_sink_get_requested_latency_within_thread(s);
3004 /* Yes, that's right, the IO thread will see -1 when no
3005 * explicit requested latency is configured, the main
3006 * thread will see max_latency */
3007 if (*usec == (pa_usec_t) -1)
3008 *usec = s->thread_info.max_latency;
3013 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
3014 pa_usec_t *r = userdata;
3016 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
3021 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
3022 pa_usec_t *r = userdata;
3024 r[0] = s->thread_info.min_latency;
3025 r[1] = s->thread_info.max_latency;
3030 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3032 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3035 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3037 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3040 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3042 *((size_t*) userdata) = s->thread_info.max_rewind;
3045 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3047 *((size_t*) userdata) = s->thread_info.max_request;
3050 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3052 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3055 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3057 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3060 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3061 /* This message is sent from IO-thread and handled in main thread. */
3062 pa_assert_ctl_context();
3064 /* Make sure we're not messing with main thread when no longer linked */
3065 if (!PA_SINK_IS_LINKED(s->state))
3068 pa_sink_get_volume(s, true);
3069 pa_sink_get_mute(s, true);
3072 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3073 s->thread_info.port_latency_offset = offset;
3076 case PA_SINK_MESSAGE_GET_LATENCY:
3077 case PA_SINK_MESSAGE_MAX:
3084 /* Called from main thread */
3085 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3090 pa_core_assert_ref(c);
3091 pa_assert_ctl_context();
3092 pa_assert(cause != 0);
3094 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3097 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3104 /* Called from IO thread */
3105 void pa_sink_detach_within_thread(pa_sink *s) {
3109 pa_sink_assert_ref(s);
3110 pa_sink_assert_io_context(s);
3111 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3113 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3114 pa_sink_input_detach(i);
3116 if (s->monitor_source)
3117 pa_source_detach_within_thread(s->monitor_source);
3120 /* Called from IO thread */
3121 void pa_sink_attach_within_thread(pa_sink *s) {
3125 pa_sink_assert_ref(s);
3126 pa_sink_assert_io_context(s);
3127 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3129 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3130 pa_sink_input_attach(i);
3132 if (s->monitor_source)
3133 pa_source_attach_within_thread(s->monitor_source);
3136 /* Called from IO thread */
3137 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3138 pa_sink_assert_ref(s);
3139 pa_sink_assert_io_context(s);
3140 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3142 if (nbytes == (size_t) -1)
3143 nbytes = s->thread_info.max_rewind;
3145 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3147 if (s->thread_info.rewind_requested &&
3148 nbytes <= s->thread_info.rewind_nbytes)
3151 s->thread_info.rewind_nbytes = nbytes;
3152 s->thread_info.rewind_requested = true;
3154 if (s->request_rewind)
3155 s->request_rewind(s);
3158 /* Called from IO thread */
3159 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3160 pa_usec_t result = (pa_usec_t) -1;
3163 pa_usec_t monitor_latency;
3165 pa_sink_assert_ref(s);
3166 pa_sink_assert_io_context(s);
3168 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3169 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3171 if (s->thread_info.requested_latency_valid)
3172 return s->thread_info.requested_latency;
3174 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3175 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3176 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3177 result = i->thread_info.requested_sink_latency;
3179 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3181 if (monitor_latency != (pa_usec_t) -1 &&
3182 (result == (pa_usec_t) -1 || result > monitor_latency))
3183 result = monitor_latency;
3185 if (result != (pa_usec_t) -1)
3186 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3188 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3189 /* Only cache if properly initialized */
3190 s->thread_info.requested_latency = result;
3191 s->thread_info.requested_latency_valid = true;
3197 /* Called from main thread */
3198 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3201 pa_sink_assert_ref(s);
3202 pa_assert_ctl_context();
3203 pa_assert(PA_SINK_IS_LINKED(s->state));
3205 if (s->state == PA_SINK_SUSPENDED)
3208 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3213 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3214 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3218 pa_sink_assert_ref(s);
3219 pa_sink_assert_io_context(s);
3221 if (max_rewind == s->thread_info.max_rewind)
3224 s->thread_info.max_rewind = max_rewind;
3226 if (PA_SINK_IS_LINKED(s->thread_info.state))
3227 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3228 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3230 if (s->monitor_source)
3231 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3234 /* Called from main thread */
3235 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3236 pa_sink_assert_ref(s);
3237 pa_assert_ctl_context();
3239 if (PA_SINK_IS_LINKED(s->state))
3240 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3242 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3245 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3246 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3249 pa_sink_assert_ref(s);
3250 pa_sink_assert_io_context(s);
3252 if (max_request == s->thread_info.max_request)
3255 s->thread_info.max_request = max_request;
3257 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3260 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3261 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3265 /* Called from main thread */
3266 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3267 pa_sink_assert_ref(s);
3268 pa_assert_ctl_context();
3270 if (PA_SINK_IS_LINKED(s->state))
3271 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3273 pa_sink_set_max_request_within_thread(s, max_request);
3276 /* Called from IO thread */
3277 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3281 pa_sink_assert_ref(s);
3282 pa_sink_assert_io_context(s);
3284 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3285 s->thread_info.requested_latency_valid = false;
3289 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3291 if (s->update_requested_latency)
3292 s->update_requested_latency(s);
3294 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3295 if (i->update_sink_requested_latency)
3296 i->update_sink_requested_latency(i);
3300 /* Called from main thread */
3301 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3302 pa_sink_assert_ref(s);
3303 pa_assert_ctl_context();
3305 /* min_latency == 0: no limit
3306 * min_latency anything else: specified limit
3308 * Similar for max_latency */
3310 if (min_latency < ABSOLUTE_MIN_LATENCY)
3311 min_latency = ABSOLUTE_MIN_LATENCY;
3313 if (max_latency <= 0 ||
3314 max_latency > ABSOLUTE_MAX_LATENCY)
3315 max_latency = ABSOLUTE_MAX_LATENCY;
3317 pa_assert(min_latency <= max_latency);
3319 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3320 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3321 max_latency == ABSOLUTE_MAX_LATENCY) ||
3322 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3324 if (PA_SINK_IS_LINKED(s->state)) {
3330 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3332 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3335 /* Called from main thread */
3336 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3337 pa_sink_assert_ref(s);
3338 pa_assert_ctl_context();
3339 pa_assert(min_latency);
3340 pa_assert(max_latency);
3342 if (PA_SINK_IS_LINKED(s->state)) {
3343 pa_usec_t r[2] = { 0, 0 };
3345 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3347 *min_latency = r[0];
3348 *max_latency = r[1];
3350 *min_latency = s->thread_info.min_latency;
3351 *max_latency = s->thread_info.max_latency;
3355 /* Called from IO thread */
3356 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3357 pa_sink_assert_ref(s);
3358 pa_sink_assert_io_context(s);
3360 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3361 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3362 pa_assert(min_latency <= max_latency);
3364 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3365 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3366 max_latency == ABSOLUTE_MAX_LATENCY) ||
3367 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3369 if (s->thread_info.min_latency == min_latency &&
3370 s->thread_info.max_latency == max_latency)
3373 s->thread_info.min_latency = min_latency;
3374 s->thread_info.max_latency = max_latency;
3376 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3380 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3381 if (i->update_sink_latency_range)
3382 i->update_sink_latency_range(i);
3385 pa_sink_invalidate_requested_latency(s, false);
3387 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3390 /* Called from main thread */
3391 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3392 pa_sink_assert_ref(s);
3393 pa_assert_ctl_context();
3395 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3396 pa_assert(latency == 0);
3400 if (latency < ABSOLUTE_MIN_LATENCY)
3401 latency = ABSOLUTE_MIN_LATENCY;
3403 if (latency > ABSOLUTE_MAX_LATENCY)
3404 latency = ABSOLUTE_MAX_LATENCY;
3406 if (PA_SINK_IS_LINKED(s->state))
3407 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3409 s->thread_info.fixed_latency = latency;
3411 pa_source_set_fixed_latency(s->monitor_source, latency);
3414 /* Called from main thread */
3415 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3418 pa_sink_assert_ref(s);
3419 pa_assert_ctl_context();
3421 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3424 if (PA_SINK_IS_LINKED(s->state))
3425 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3427 latency = s->thread_info.fixed_latency;
3432 /* Called from IO thread */
3433 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3434 pa_sink_assert_ref(s);
3435 pa_sink_assert_io_context(s);
3437 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3438 pa_assert(latency == 0);
3439 s->thread_info.fixed_latency = 0;
3441 if (s->monitor_source)
3442 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3447 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3448 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3450 if (s->thread_info.fixed_latency == latency)
3453 s->thread_info.fixed_latency = latency;
3455 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3459 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3460 if (i->update_sink_fixed_latency)
3461 i->update_sink_fixed_latency(i);
3464 pa_sink_invalidate_requested_latency(s, false);
3466 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3469 /* Called from main context */
3470 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3471 pa_sink_assert_ref(s);
3473 s->port_latency_offset = offset;
3475 if (PA_SINK_IS_LINKED(s->state))
3476 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3478 s->thread_info.port_latency_offset = offset;
3480 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3483 /* Called from main context */
3484 size_t pa_sink_get_max_rewind(pa_sink *s) {
3486 pa_assert_ctl_context();
3487 pa_sink_assert_ref(s);
3489 if (!PA_SINK_IS_LINKED(s->state))
3490 return s->thread_info.max_rewind;
3492 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3497 /* Called from main context */
3498 size_t pa_sink_get_max_request(pa_sink *s) {
3500 pa_sink_assert_ref(s);
3501 pa_assert_ctl_context();
3503 if (!PA_SINK_IS_LINKED(s->state))
3504 return s->thread_info.max_request;
3506 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3511 /* Called from main context */
3512 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3513 pa_device_port *port;
3515 pa_sink_assert_ref(s);
3516 pa_assert_ctl_context();
3519 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3520 return -PA_ERR_NOTIMPLEMENTED;
3524 return -PA_ERR_NOENTITY;
3526 if (!(port = pa_hashmap_get(s->ports, name)))
3527 return -PA_ERR_NOENTITY;
3529 if (s->active_port == port) {
3530 s->save_port = s->save_port || save;
3534 if (s->set_port(s, port) < 0)
3535 return -PA_ERR_NOENTITY;
3537 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3539 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3541 s->active_port = port;
3542 s->save_port = save;
3544 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3546 /* The active port affects the default sink selection. */
3547 pa_core_update_default_sink(s->core);
3549 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3554 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3555 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3559 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3562 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3564 if (pa_streq(ff, "microphone"))
3565 t = "audio-input-microphone";
3566 else if (pa_streq(ff, "webcam"))
3568 else if (pa_streq(ff, "computer"))
3570 else if (pa_streq(ff, "handset"))
3572 else if (pa_streq(ff, "portable"))
3573 t = "multimedia-player";
3574 else if (pa_streq(ff, "tv"))
3575 t = "video-display";
3578 * The following icons are not part of the icon naming spec,
3579 * because Rodney Dawes sucks as the maintainer of that spec.
3581 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3583 else if (pa_streq(ff, "headset"))
3584 t = "audio-headset";
3585 else if (pa_streq(ff, "headphone"))
3586 t = "audio-headphones";
3587 else if (pa_streq(ff, "speaker"))
3588 t = "audio-speakers";
3589 else if (pa_streq(ff, "hands-free"))
3590 t = "audio-handsfree";
3594 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3595 if (pa_streq(c, "modem"))
3602 t = "audio-input-microphone";
3605 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3606 if (strstr(profile, "analog"))
3608 else if (strstr(profile, "iec958"))
3610 else if (strstr(profile, "hdmi"))
3614 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3616 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3621 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3622 const char *s, *d = NULL, *k;
3625 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3629 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3633 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3634 if (pa_streq(s, "internal"))
3635 d = _("Built-in Audio");
3638 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3639 if (pa_streq(s, "modem"))
3643 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3648 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3651 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3653 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3658 bool pa_device_init_intended_roles(pa_proplist *p) {
3662 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3665 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3666 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3667 || pa_streq(s, "headset")) {
3668 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3675 unsigned pa_device_init_priority(pa_proplist *p) {
3677 unsigned priority = 0;
3681 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3683 if (pa_streq(s, "sound"))
3685 else if (!pa_streq(s, "modem"))
3689 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3691 if (pa_streq(s, "headphone"))
3693 else if (pa_streq(s, "hifi"))
3695 else if (pa_streq(s, "speaker"))
3697 else if (pa_streq(s, "portable"))
3701 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3703 if (pa_streq(s, "bluetooth"))
3705 else if (pa_streq(s, "usb"))
3707 else if (pa_streq(s, "pci"))
3711 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3713 if (pa_startswith(s, "analog-"))
3715 else if (pa_startswith(s, "iec958-"))
3722 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3724 /* Called from the IO thread. */
3725 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3726 pa_sink_volume_change *c;
3727 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3728 c = pa_xnew(pa_sink_volume_change, 1);
3730 PA_LLIST_INIT(pa_sink_volume_change, c);
3732 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3736 /* Called from the IO thread. */
3737 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3739 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3743 /* Called from the IO thread. */
3744 void pa_sink_volume_change_push(pa_sink *s) {
3745 pa_sink_volume_change *c = NULL;
3746 pa_sink_volume_change *nc = NULL;
3747 pa_sink_volume_change *pc = NULL;
3748 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3750 const char *direction = NULL;
3753 nc = pa_sink_volume_change_new(s);
3755 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3756 * Adding one more volume for HW would get us rid of this, but I am trying
3757 * to survive with the ones we already have. */
3758 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3760 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3761 pa_log_debug("Volume not changing");
3762 pa_sink_volume_change_free(nc);
3766 nc->at = pa_sink_get_latency_within_thread(s, false);
3767 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3769 if (s->thread_info.volume_changes_tail) {
3770 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3771 /* If volume is going up let's do it a bit late. If it is going
3772 * down let's do it a bit early. */
3773 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3774 if (nc->at + safety_margin > c->at) {
3775 nc->at += safety_margin;
3780 else if (nc->at - safety_margin > c->at) {
3781 nc->at -= safety_margin;
3789 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3790 nc->at += safety_margin;
3793 nc->at -= safety_margin;
3796 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3799 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3802 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3804 /* We can ignore volume events that came earlier but should happen later than this. */
3805 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3806 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3807 pa_sink_volume_change_free(c);
3810 s->thread_info.volume_changes_tail = nc;
3813 /* Called from the IO thread. */
3814 static void pa_sink_volume_change_flush(pa_sink *s) {
3815 pa_sink_volume_change *c = s->thread_info.volume_changes;
3817 s->thread_info.volume_changes = NULL;
3818 s->thread_info.volume_changes_tail = NULL;
3820 pa_sink_volume_change *next = c->next;
3821 pa_sink_volume_change_free(c);
3826 /* Called from the IO thread. */
3827 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3833 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3839 pa_assert(s->write_volume);
3841 now = pa_rtclock_now();
3843 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3844 pa_sink_volume_change *c = s->thread_info.volume_changes;
3845 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3846 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3847 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3849 s->thread_info.current_hw_volume = c->hw_volume;
3850 pa_sink_volume_change_free(c);
3856 if (s->thread_info.volume_changes) {
3858 *usec_to_next = s->thread_info.volume_changes->at - now;
3859 if (pa_log_ratelimit(PA_LOG_DEBUG))
3860 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3865 s->thread_info.volume_changes_tail = NULL;
3870 /* Called from the IO thread. */
3871 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3872 /* All the queued volume events later than current latency are shifted to happen earlier. */
3873 pa_sink_volume_change *c;
3874 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3875 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3876 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3878 pa_log_debug("latency = %lld", (long long) limit);
3879 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3881 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3882 pa_usec_t modified_limit = limit;
3883 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3884 modified_limit -= s->thread_info.volume_change_safety_margin;
3886 modified_limit += s->thread_info.volume_change_safety_margin;
3887 if (c->at > modified_limit) {
3889 if (c->at < modified_limit)
3890 c->at = modified_limit;
3892 prev_vol = pa_cvolume_avg(&c->hw_volume);
3894 pa_sink_volume_change_apply(s, NULL);
3897 /* Called from the main thread */
3898 /* Gets the list of formats supported by the sink. The members and idxset must
3899 * be freed by the caller. */
3900 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3905 if (s->get_formats) {
3906 /* Sink supports format query, all is good */
3907 ret = s->get_formats(s);
3909 /* Sink doesn't support format query, so assume it does PCM */
3910 pa_format_info *f = pa_format_info_new();
3911 f->encoding = PA_ENCODING_PCM;
3913 ret = pa_idxset_new(NULL, NULL);
3914 pa_idxset_put(ret, f, NULL);
3920 /* Called from the main thread */
3921 /* Allows an external source to set what formats a sink supports if the sink
3922 * permits this. The function makes a copy of the formats on success. */
3923 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3928 /* Sink supports setting formats -- let's give it a shot */
3929 return s->set_formats(s, formats);
3931 /* Sink doesn't support setting this -- bail out */
3935 /* Called from the main thread */
3936 /* Checks if the sink can accept this format */
3937 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3938 pa_idxset *formats = NULL;
3944 formats = pa_sink_get_formats(s);
3947 pa_format_info *finfo_device;
3950 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3951 if (pa_format_info_is_compatible(finfo_device, f)) {
3957 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3963 /* Called from the main thread */
3964 /* Calculates the intersection between formats supported by the sink and
3965 * in_formats, and returns these, in the order of the sink's formats. */
3966 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3967 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3968 pa_format_info *f_sink, *f_in;
3973 if (!in_formats || pa_idxset_isempty(in_formats))
3976 sink_formats = pa_sink_get_formats(s);
3978 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3979 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3980 if (pa_format_info_is_compatible(f_sink, f_in))
3981 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3987 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3992 /* Called from the main thread */
3993 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3994 pa_sample_format_t old_format;
3997 pa_assert(pa_sample_format_valid(format));
3999 old_format = s->sample_spec.format;
4000 if (old_format == format)
4003 pa_log_info("%s: format: %s -> %s",
4004 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
4006 s->sample_spec.format = format;
4008 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4011 /* Called from the main thread */
4012 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
4016 pa_assert(pa_sample_rate_valid(rate));
4018 old_rate = s->sample_spec.rate;
4019 if (old_rate == rate)
4022 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
4024 s->sample_spec.rate = rate;
4026 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4029 /* Called from the main thread. */
4030 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4031 pa_cvolume old_volume;
4032 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4033 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4038 old_volume = s->reference_volume;
4040 if (pa_cvolume_equal(volume, &old_volume))
4043 s->reference_volume = *volume;
4044 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4045 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4046 s->flags & PA_SINK_DECIBEL_VOLUME),
4047 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4048 s->flags & PA_SINK_DECIBEL_VOLUME));
4050 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4051 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);