2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
53 #include <pulsecore/proplist-util.h>
58 #define MAX_MIX_CHANNELS 32
59 #define MIX_BUFFER_LENGTH (pa_page_size())
60 #define ABSOLUTE_MIN_LATENCY (500)
61 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
62 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
64 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
66 struct pa_sink_volume_change {
70 PA_LLIST_FIELDS(pa_sink_volume_change);
73 struct set_state_data {
74 pa_sink_state_t state;
75 pa_suspend_cause_t suspend_cause;
78 static void sink_free(pa_object *s);
80 static void pa_sink_volume_change_push(pa_sink *s);
81 static void pa_sink_volume_change_flush(pa_sink *s);
82 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
85 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
87 char *dump_time = NULL, *dump_path_surfix = NULL;
88 const char *s_device_api_str, *card_name_str, *device_idx_str;
93 /* open file for dump pcm */
94 if (s->core->pcm_dump & PA_PCM_DUMP_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
95 pa_gettimeofday(&now);
96 localtime_r(&now.tv_sec, &tm);
97 memset(&datetime[0], 0x00, sizeof(datetime));
98 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
99 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
101 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
102 if (pa_streq(s_device_api_str, "alsa")) {
103 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
104 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
105 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
107 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
110 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
113 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
114 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
116 s->pcm_dump_fp = fopen(s->dump_path, "w");
118 pa_log_warn("%s open failed", s->dump_path);
120 pa_log_info("%s opened", s->dump_path);
123 pa_xfree(dump_path_surfix);
124 /* close file for dump pcm when config is changed */
125 } else if (~s->core->pcm_dump & PA_PCM_DUMP_SINK && s->pcm_dump_fp) {
126 fclose(s->pcm_dump_fp);
127 pa_log_info("%s closed", s->dump_path);
128 pa_xfree(s->dump_path);
129 s->pcm_dump_fp = NULL;
133 if (s->pcm_dump_fp) {
136 ptr = pa_memblock_acquire(chunk->memblock);
138 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
140 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
142 pa_memblock_release(chunk->memblock);
147 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
151 data->proplist = pa_proplist_new();
152 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
157 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
160 pa_xfree(data->name);
161 data->name = pa_xstrdup(name);
164 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
167 if ((data->sample_spec_is_set = !!spec))
168 data->sample_spec = *spec;
171 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
174 if ((data->channel_map_is_set = !!map))
175 data->channel_map = *map;
178 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
181 data->alternate_sample_rate_is_set = true;
182 data->alternate_sample_rate = alternate_sample_rate;
185 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
188 data->avoid_resampling_is_set = true;
189 data->avoid_resampling = avoid_resampling;
192 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
195 if ((data->volume_is_set = !!volume))
196 data->volume = *volume;
199 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
202 data->muted_is_set = true;
206 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
209 pa_xfree(data->active_port);
210 data->active_port = pa_xstrdup(port);
213 void pa_sink_new_data_done(pa_sink_new_data *data) {
216 pa_proplist_free(data->proplist);
219 pa_hashmap_free(data->ports);
221 pa_xfree(data->name);
222 pa_xfree(data->active_port);
225 /* Called from main context */
226 static void reset_callbacks(pa_sink *s) {
229 s->set_state_in_main_thread = NULL;
230 s->set_state_in_io_thread = NULL;
231 s->get_volume = NULL;
232 s->set_volume = NULL;
233 s->write_volume = NULL;
236 s->request_rewind = NULL;
237 s->update_requested_latency = NULL;
239 s->get_formats = NULL;
240 s->set_formats = NULL;
241 s->reconfigure = NULL;
244 /* Called from main context */
245 pa_sink* pa_sink_new(
247 pa_sink_new_data *data,
248 pa_sink_flags_t flags) {
252 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
253 pa_source_new_data source_data;
259 pa_assert(data->name);
260 pa_assert_ctl_context();
262 s = pa_msgobject_new(pa_sink);
264 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
265 pa_log_debug("Failed to register name %s.", data->name);
270 pa_sink_new_data_set_name(data, name);
272 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
274 pa_namereg_unregister(core, name);
278 /* FIXME, need to free s here on failure */
280 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
281 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
283 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
285 if (!data->channel_map_is_set)
286 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
288 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
289 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
291 /* FIXME: There should probably be a general function for checking whether
292 * the sink volume is allowed to be set, like there is for sink inputs. */
293 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
295 if (!data->volume_is_set) {
296 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
297 data->save_volume = false;
300 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
301 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
303 if (!data->muted_is_set)
307 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
309 pa_device_init_description(data->proplist, data->card);
310 pa_device_init_icon(data->proplist, true);
311 pa_device_init_intended_roles(data->proplist);
313 if (!data->active_port) {
314 pa_device_port *p = pa_device_port_find_best(data->ports);
316 pa_sink_new_data_set_port(data, p->name);
319 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
321 pa_namereg_unregister(core, name);
325 s->parent.parent.free = sink_free;
326 s->parent.process_msg = pa_sink_process_msg;
329 s->state = PA_SINK_INIT;
332 s->suspend_cause = data->suspend_cause;
333 s->name = pa_xstrdup(name);
334 s->proplist = pa_proplist_copy(data->proplist);
335 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
336 s->module = data->module;
337 s->card = data->card;
339 s->priority = pa_device_init_priority(s->proplist);
341 s->sample_spec = data->sample_spec;
342 s->channel_map = data->channel_map;
343 s->default_sample_rate = s->sample_spec.rate;
345 if (data->alternate_sample_rate_is_set)
346 s->alternate_sample_rate = data->alternate_sample_rate;
348 s->alternate_sample_rate = s->core->alternate_sample_rate;
350 if (data->avoid_resampling_is_set)
351 s->avoid_resampling = data->avoid_resampling;
353 s->avoid_resampling = s->core->avoid_resampling;
355 s->origin_avoid_resampling = data->avoid_resampling;
356 s->selected_sample_format = s->sample_spec.format;
357 s->selected_sample_rate = s->sample_spec.rate;
360 s->inputs = pa_idxset_new(NULL, NULL);
362 s->input_to_master = NULL;
364 s->reference_volume = s->real_volume = data->volume;
365 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
366 s->base_volume = PA_VOLUME_NORM;
367 s->n_volume_steps = PA_VOLUME_NORM+1;
368 s->muted = data->muted;
369 s->refresh_volume = s->refresh_muted = false;
376 /* As a minor optimization we just steal the list instead of
378 s->ports = data->ports;
381 s->active_port = NULL;
382 s->save_port = false;
384 if (data->active_port)
385 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
386 s->save_port = data->save_port;
388 /* Hopefully the active port has already been assigned in the previous call
389 to pa_device_port_find_best, but better safe than sorry */
391 s->active_port = pa_device_port_find_best(s->ports);
394 s->port_latency_offset = s->active_port->latency_offset;
396 s->port_latency_offset = 0;
398 s->save_volume = data->save_volume;
399 s->save_muted = data->save_muted;
400 #ifdef TIZEN_PCM_DUMP
401 s->pcm_dump_fp = NULL;
405 pa_silence_memchunk_get(
406 &core->silence_cache,
412 s->thread_info.rtpoll = NULL;
413 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
414 (pa_free_cb_t) pa_sink_input_unref);
415 s->thread_info.soft_volume = s->soft_volume;
416 s->thread_info.soft_muted = s->muted;
417 s->thread_info.state = s->state;
418 s->thread_info.rewind_nbytes = 0;
419 s->thread_info.rewind_requested = false;
420 s->thread_info.max_rewind = 0;
421 s->thread_info.max_request = 0;
422 s->thread_info.requested_latency_valid = false;
423 s->thread_info.requested_latency = 0;
424 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
425 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
426 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
428 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
429 s->thread_info.volume_changes_tail = NULL;
430 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
431 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
432 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
433 s->thread_info.port_latency_offset = s->port_latency_offset;
435 /* FIXME: This should probably be moved to pa_sink_put() */
436 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
439 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
441 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
442 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
445 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
446 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
450 pa_source_new_data_init(&source_data);
451 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
452 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
453 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
454 pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
455 source_data.name = pa_sprintf_malloc("%s.monitor", name);
456 source_data.driver = data->driver;
457 source_data.module = data->module;
458 source_data.card = data->card;
460 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
461 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
462 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
464 s->monitor_source = pa_source_new(core, &source_data,
465 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
466 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
468 pa_source_new_data_done(&source_data);
470 if (!s->monitor_source) {
476 s->monitor_source->monitor_of = s;
478 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
479 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
480 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
485 /* Called from main context */
486 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
489 bool suspend_cause_changed;
492 pa_sink_state_t old_state;
493 pa_suspend_cause_t old_suspend_cause;
496 pa_assert_ctl_context();
498 state_changed = state != s->state;
499 suspend_cause_changed = suspend_cause != s->suspend_cause;
501 if (!state_changed && !suspend_cause_changed)
504 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
505 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
507 /* If we are resuming, suspend_cause must be 0. */
508 pa_assert(!resuming || !suspend_cause);
510 /* Here's something to think about: what to do with the suspend cause if
511 * resuming the sink fails? The old suspend cause will be incorrect, so we
512 * can't use that. On the other hand, if we set no suspend cause (as is the
513 * case currently), then it looks strange to have a sink suspended without
514 * any cause. It might be a good idea to add a new "resume failed" suspend
515 * cause, or it might just add unnecessary complexity, given that the
516 * current approach of not setting any suspend cause works well enough. */
518 if (s->set_state_in_main_thread) {
519 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
520 /* set_state_in_main_thread() is allowed to fail only when resuming. */
523 /* If resuming fails, we set the state to SUSPENDED and
524 * suspend_cause to 0. */
525 state = PA_SINK_SUSPENDED;
527 state_changed = false;
528 suspend_cause_changed = suspend_cause != s->suspend_cause;
531 /* We know the state isn't changing. If the suspend cause isn't
532 * changing either, then there's nothing more to do. */
533 if (!suspend_cause_changed)
539 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
541 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
542 /* SET_STATE is allowed to fail only when resuming. */
545 if (s->set_state_in_main_thread)
546 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
548 /* If resuming fails, we set the state to SUSPENDED and
549 * suspend_cause to 0. */
550 state = PA_SINK_SUSPENDED;
552 state_changed = false;
553 suspend_cause_changed = suspend_cause != s->suspend_cause;
556 /* We know the state isn't changing. If the suspend cause isn't
557 * changing either, then there's nothing more to do. */
558 if (!suspend_cause_changed)
563 #ifdef TIZEN_PCM_DUMP
564 /* close file for dump pcm */
565 if (s->pcm_dump_fp && (s->core->pcm_dump & PA_PCM_DUMP_SEPARATED) && suspending) {
566 fclose(s->pcm_dump_fp);
567 pa_log_info("%s closed", s->dump_path);
568 pa_xfree(s->dump_path);
569 s->pcm_dump_fp = NULL;
572 old_suspend_cause = s->suspend_cause;
573 if (suspend_cause_changed) {
574 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
575 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
577 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
578 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
579 s->suspend_cause = suspend_cause;
582 old_state = s->state;
584 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
587 /* If we enter UNLINKED state, then we don't send change notifications.
588 * pa_sink_unlink() will send unlink notifications instead. */
589 if (state != PA_SINK_UNLINKED) {
590 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
591 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
595 if (suspending || resuming || suspend_cause_changed) {
599 /* We're suspending or resuming, tell everyone about it */
601 PA_IDXSET_FOREACH(i, s->inputs, idx)
602 if (s->state == PA_SINK_SUSPENDED &&
603 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
604 pa_sink_input_kill(i);
606 i->suspend(i, old_state, old_suspend_cause);
609 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
610 pa_source_sync_suspend(s->monitor_source);
615 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
621 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
622 pa_sink_flags_t flags;
625 pa_assert(!s->write_volume || cb);
629 /* Save the current flags so we can tell if they've changed */
633 /* The sink implementor is responsible for setting decibel volume support */
634 s->flags |= PA_SINK_HW_VOLUME_CTRL;
636 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
637 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
638 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
641 /* If the flags have changed after init, let any clients know via a change event */
642 if (s->state != PA_SINK_INIT && flags != s->flags)
643 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
646 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
647 pa_sink_flags_t flags;
650 pa_assert(!cb || s->set_volume);
652 s->write_volume = cb;
654 /* Save the current flags so we can tell if they've changed */
658 s->flags |= PA_SINK_DEFERRED_VOLUME;
660 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
662 /* If the flags have changed after init, let any clients know via a change event */
663 if (s->state != PA_SINK_INIT && flags != s->flags)
664 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
667 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
673 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
674 pa_sink_flags_t flags;
680 /* Save the current flags so we can tell if they've changed */
684 s->flags |= PA_SINK_HW_MUTE_CTRL;
686 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
688 /* If the flags have changed after init, let any clients know via a change event */
689 if (s->state != PA_SINK_INIT && flags != s->flags)
690 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
693 static void enable_flat_volume(pa_sink *s, bool enable) {
694 pa_sink_flags_t flags;
698 /* Always follow the overall user preference here */
699 enable = enable && s->core->flat_volumes;
701 /* Save the current flags so we can tell if they've changed */
705 s->flags |= PA_SINK_FLAT_VOLUME;
707 s->flags &= ~PA_SINK_FLAT_VOLUME;
709 /* If the flags have changed after init, let any clients know via a change event */
710 if (s->state != PA_SINK_INIT && flags != s->flags)
711 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
714 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
715 pa_sink_flags_t flags;
719 /* Save the current flags so we can tell if they've changed */
723 s->flags |= PA_SINK_DECIBEL_VOLUME;
724 enable_flat_volume(s, true);
726 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
727 enable_flat_volume(s, false);
730 /* If the flags have changed after init, let any clients know via a change event */
731 if (s->state != PA_SINK_INIT && flags != s->flags)
732 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
735 /* Called from main context */
736 void pa_sink_put(pa_sink* s) {
737 pa_sink_assert_ref(s);
738 pa_assert_ctl_context();
740 pa_assert(s->state == PA_SINK_INIT);
741 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
743 /* The following fields must be initialized properly when calling _put() */
744 pa_assert(s->asyncmsgq);
745 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
747 /* Generally, flags should be initialized via pa_sink_new(). As a
748 * special exception we allow some volume related flags to be set
749 * between _new() and _put() by the callback setter functions above.
751 * Thus we implement a couple safeguards here which ensure the above
752 * setters were used (or at least the implementor made manual changes
753 * in a compatible way).
755 * Note: All of these flags set here can change over the life time
757 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
758 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
759 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
761 /* XXX: Currently decibel volume is disabled for all sinks that use volume
762 * sharing. When the master sink supports decibel volume, it would be good
763 * to have the flag also in the filter sink, but currently we don't do that
764 * so that the flags of the filter sink never change when it's moved from
765 * a master sink to another. One solution for this problem would be to
766 * remove user-visible volume altogether from filter sinks when volume
767 * sharing is used, but the current approach was easier to implement... */
768 /* We always support decibel volumes in software, otherwise we leave it to
769 * the sink implementor to set this flag as needed.
771 * Note: This flag can also change over the life time of the sink. */
772 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
773 pa_sink_enable_decibel_volume(s, true);
774 s->soft_volume = s->reference_volume;
777 /* If the sink implementor support DB volumes by itself, we should always
778 * try and enable flat volumes too */
779 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
780 enable_flat_volume(s, true);
782 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
783 pa_sink *root_sink = pa_sink_get_master(s);
785 pa_assert(root_sink);
787 s->reference_volume = root_sink->reference_volume;
788 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
790 s->real_volume = root_sink->real_volume;
791 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
793 /* We assume that if the sink implementor changed the default
794 * volume they did so in real_volume, because that is the usual
795 * place where they are supposed to place their changes. */
796 s->reference_volume = s->real_volume;
798 s->thread_info.soft_volume = s->soft_volume;
799 s->thread_info.soft_muted = s->muted;
800 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
802 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
803 || (s->base_volume == PA_VOLUME_NORM
804 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
805 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
806 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
807 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
808 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
810 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
811 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
812 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
814 if (s->suspend_cause)
815 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
817 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
819 pa_source_put(s->monitor_source);
821 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
822 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
824 /* It's good to fire the SINK_PUT hook before updating the default sink,
825 * because module-switch-on-connect will set the new sink as the default
826 * sink, and if we were to call pa_core_update_default_sink() before that,
827 * the default sink might change twice, causing unnecessary stream moving. */
829 pa_core_update_default_sink(s->core);
831 pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
834 /* Called from main context */
835 void pa_sink_unlink(pa_sink* s) {
837 pa_sink_input *i, PA_UNUSED *j = NULL;
839 pa_sink_assert_ref(s);
840 pa_assert_ctl_context();
842 /* Please note that pa_sink_unlink() does more than simply
843 * reversing pa_sink_put(). It also undoes the registrations
844 * already done in pa_sink_new()! */
846 if (s->unlink_requested)
849 s->unlink_requested = true;
851 linked = PA_SINK_IS_LINKED(s->state);
854 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
856 if (s->state != PA_SINK_UNLINKED)
857 pa_namereg_unregister(s->core, s->name);
858 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
860 pa_core_update_default_sink(s->core);
862 if (linked && s->core->rescue_streams)
863 pa_sink_move_streams_to_default_sink(s->core, s, false);
866 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
868 while ((i = pa_idxset_first(s->inputs, NULL))) {
870 pa_sink_input_kill(i);
874 /* Unlink monitor source before unlinking the sink */
875 if (s->monitor_source)
876 pa_source_unlink(s->monitor_source);
879 /* It's important to keep the suspend cause unchanged when unlinking,
880 * because if we remove the SESSION suspend cause here, the alsa sink
881 * will sync its volume with the hardware while another user is
882 * active, messing up the volume for that other user. */
883 sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
885 s->state = PA_SINK_UNLINKED;
890 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
891 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
895 /* Called from main context */
896 static void sink_free(pa_object *o) {
897 pa_sink *s = PA_SINK(o);
900 pa_assert_ctl_context();
901 pa_assert(pa_sink_refcnt(s) == 0);
902 pa_assert(!PA_SINK_IS_LINKED(s->state));
904 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
906 pa_sink_volume_change_flush(s);
908 if (s->monitor_source) {
909 pa_source_unref(s->monitor_source);
910 s->monitor_source = NULL;
913 pa_idxset_free(s->inputs, NULL);
914 pa_hashmap_free(s->thread_info.inputs);
916 if (s->silence.memblock)
917 pa_memblock_unref(s->silence.memblock);
923 pa_proplist_free(s->proplist);
926 pa_hashmap_free(s->ports);
928 #ifdef TIZEN_PCM_DUMP
929 /* close file for dump pcm */
930 if (s->pcm_dump_fp) {
931 fclose(s->pcm_dump_fp);
932 pa_log_info("%s closed", s->dump_path);
933 pa_xfree(s->dump_path);
934 s->pcm_dump_fp = NULL;
940 /* Called from main context, and not while the IO thread is active, please */
941 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
942 pa_sink_assert_ref(s);
943 pa_assert_ctl_context();
947 if (s->monitor_source)
948 pa_source_set_asyncmsgq(s->monitor_source, q);
951 /* Called from main context, and not while the IO thread is active, please */
952 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
953 pa_sink_flags_t old_flags;
954 pa_sink_input *input;
957 pa_sink_assert_ref(s);
958 pa_assert_ctl_context();
960 /* For now, allow only a minimal set of flags to be changed. */
961 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
963 old_flags = s->flags;
964 s->flags = (s->flags & ~mask) | (value & mask);
966 if (s->flags == old_flags)
969 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
970 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
972 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
973 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
974 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
976 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
977 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
979 if (s->monitor_source)
980 pa_source_update_flags(s->monitor_source,
981 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
982 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
983 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
984 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
986 PA_IDXSET_FOREACH(input, s->inputs, idx) {
987 if (input->origin_sink)
988 pa_sink_update_flags(input->origin_sink, mask, value);
992 /* Called from IO context, or before _put() from main context */
993 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
994 pa_sink_assert_ref(s);
995 pa_sink_assert_io_context(s);
997 s->thread_info.rtpoll = p;
999 if (s->monitor_source)
1000 pa_source_set_rtpoll(s->monitor_source, p);
1003 /* Called from main context */
1004 int pa_sink_update_status(pa_sink*s) {
1005 pa_sink_assert_ref(s);
1006 pa_assert_ctl_context();
1007 pa_assert(PA_SINK_IS_LINKED(s->state));
1009 if (s->state == PA_SINK_SUSPENDED)
1012 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1015 /* Called from main context */
1016 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
1017 pa_suspend_cause_t merged_cause;
1019 pa_sink_assert_ref(s);
1020 pa_assert_ctl_context();
1021 pa_assert(PA_SINK_IS_LINKED(s->state));
1022 pa_assert(cause != 0);
1025 merged_cause = s->suspend_cause | cause;
1027 merged_cause = s->suspend_cause & ~cause;
1030 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
1032 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1035 /* Called from main context */
1036 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1037 pa_sink_input *i, *n;
1040 pa_sink_assert_ref(s);
1041 pa_assert_ctl_context();
1042 pa_assert(PA_SINK_IS_LINKED(s->state));
1047 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1048 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1050 pa_sink_input_ref(i);
1052 if (pa_sink_input_start_move(i) >= 0)
1053 pa_queue_push(q, i);
1055 pa_sink_input_unref(i);
1061 /* Called from main context */
1062 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1065 pa_sink_assert_ref(s);
1066 pa_assert_ctl_context();
1067 pa_assert(PA_SINK_IS_LINKED(s->state));
1070 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1071 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1072 if (pa_sink_input_finish_move(i, s, save) < 0)
1073 pa_sink_input_fail_move(i);
1076 pa_sink_input_unref(i);
1079 pa_queue_free(q, NULL);
1082 /* Called from main context */
1083 void pa_sink_move_all_fail(pa_queue *q) {
1086 pa_assert_ctl_context();
1089 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1090 pa_sink_input_fail_move(i);
1091 pa_sink_input_unref(i);
1094 pa_queue_free(q, NULL);
1097 /* Called from IO thread context */
1098 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1103 pa_sink_assert_ref(s);
1104 pa_sink_assert_io_context(s);
1106 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1107 size_t uf = i->thread_info.underrun_for_sink;
1109 /* Propagate down the filter tree */
1110 if (i->origin_sink) {
1111 size_t filter_result, left_to_play_origin;
1113 /* The recursive call works in the origin sink domain ... */
1114 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1116 /* .. and returns the time to sleep before waking up. We need the
1117 * underrun duration for comparisons, so we undo the subtraction on
1118 * the return value... */
1119 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1121 /* ... and convert it back to the master sink domain */
1122 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1124 /* Remember the longest underrun so far */
1125 if (filter_result > result)
1126 result = filter_result;
1130 /* No underrun here, move on */
1132 } else if (uf >= left_to_play) {
1133 /* The sink has possibly consumed all the data the sink input provided */
1134 pa_sink_input_process_underrun(i);
1135 } else if (uf > result) {
1136 /* Remember the longest underrun so far */
1142 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1143 (long) result, (long) left_to_play - result);
1144 return left_to_play - result;
1147 /* Called from IO thread context */
1148 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1152 pa_sink_assert_ref(s);
1153 pa_sink_assert_io_context(s);
1154 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1156 /* If nobody requested this and this is actually no real rewind
1157 * then we can short cut this. Please note that this means that
1158 * not all rewind requests triggered upstream will always be
1159 * translated in actual requests! */
1160 if (!s->thread_info.rewind_requested && nbytes <= 0)
1163 s->thread_info.rewind_nbytes = 0;
1164 s->thread_info.rewind_requested = false;
1167 pa_log_debug("Processing rewind...");
1168 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1169 pa_sink_volume_change_rewind(s, nbytes);
1170 #ifdef TIZEN_PCM_DUMP
1173 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1177 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1178 pa_sink_input_assert_ref(i);
1179 pa_sink_input_process_rewind(i, nbytes);
1183 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1184 pa_source_process_rewind(s->monitor_source, nbytes);
1188 /* Called from IO thread context */
1189 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1193 size_t mixlength = *length;
1195 pa_sink_assert_ref(s);
1196 pa_sink_assert_io_context(s);
1199 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1200 pa_sink_input_assert_ref(i);
1202 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1204 if (mixlength == 0 || info->chunk.length < mixlength)
1205 mixlength = info->chunk.length;
1207 if (pa_memblock_is_silence(info->chunk.memblock)) {
1208 pa_memblock_unref(info->chunk.memblock);
1212 info->userdata = pa_sink_input_ref(i);
1214 pa_assert(info->chunk.memblock);
1215 pa_assert(info->chunk.length > 0);
1223 *length = mixlength;
1228 /* Called from IO thread context */
1229 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1233 unsigned n_unreffed = 0;
1235 pa_sink_assert_ref(s);
1236 pa_sink_assert_io_context(s);
1238 pa_assert(result->memblock);
1239 pa_assert(result->length > 0);
1241 /* We optimize for the case where the order of the inputs has not changed */
1243 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1245 pa_mix_info* m = NULL;
1247 pa_sink_input_assert_ref(i);
1249 /* Let's try to find the matching entry info the pa_mix_info array */
1250 for (j = 0; j < n; j ++) {
1252 if (info[p].userdata == i) {
1262 /* Drop read data */
1263 pa_sink_input_drop(i, result->length);
1265 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1267 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1268 void *ostate = NULL;
1269 pa_source_output *o;
1272 if (m && m->chunk.memblock) {
1274 pa_memblock_ref(c.memblock);
1275 pa_assert(result->length <= c.length);
1276 c.length = result->length;
1278 pa_memchunk_make_writable(&c, 0);
1279 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1282 pa_memblock_ref(c.memblock);
1283 pa_assert(result->length <= c.length);
1284 c.length = result->length;
1287 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1288 pa_source_output_assert_ref(o);
1289 pa_assert(o->direct_on_input == i);
1290 pa_source_post_direct(s->monitor_source, o, &c);
1293 pa_memblock_unref(c.memblock);
1298 if (m->chunk.memblock) {
1299 pa_memblock_unref(m->chunk.memblock);
1300 pa_memchunk_reset(&m->chunk);
1303 pa_sink_input_unref(m->userdata);
1310 /* Now drop references to entries that are included in the
1311 * pa_mix_info array but don't exist anymore */
1313 if (n_unreffed < n) {
1314 for (; n > 0; info++, n--) {
1316 pa_sink_input_unref(info->userdata);
1317 if (info->chunk.memblock)
1318 pa_memblock_unref(info->chunk.memblock);
1322 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1323 pa_source_post(s->monitor_source, result);
1326 /* Called from IO thread context */
1327 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1328 pa_mix_info info[MAX_MIX_CHANNELS];
1330 size_t block_size_max;
1332 pa_sink_assert_ref(s);
1333 pa_sink_assert_io_context(s);
1334 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1335 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1338 pa_assert(!s->thread_info.rewind_requested);
1339 pa_assert(s->thread_info.rewind_nbytes == 0);
1341 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1342 result->memblock = pa_memblock_ref(s->silence.memblock);
1343 result->index = s->silence.index;
1344 result->length = PA_MIN(s->silence.length, length);
1351 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1353 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1354 if (length > block_size_max)
1355 length = pa_frame_align(block_size_max, &s->sample_spec);
1357 pa_assert(length > 0);
1359 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1363 *result = s->silence;
1364 pa_memblock_ref(result->memblock);
1366 if (result->length > length)
1367 result->length = length;
1369 } else if (n == 1) {
1372 *result = info[0].chunk;
1373 pa_memblock_ref(result->memblock);
1375 if (result->length > length)
1376 result->length = length;
1378 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1380 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1381 pa_memblock_unref(result->memblock);
1382 pa_silence_memchunk_get(&s->core->silence_cache,
1387 } else if (!pa_cvolume_is_norm(&volume)) {
1388 pa_memchunk_make_writable(result, 0);
1389 pa_volume_memchunk(result, &s->sample_spec, &volume);
1393 result->memblock = pa_memblock_new(s->core->mempool, length);
1395 ptr = pa_memblock_acquire(result->memblock);
1396 result->length = pa_mix(info, n,
1399 &s->thread_info.soft_volume,
1400 s->thread_info.soft_muted);
1401 pa_memblock_release(result->memblock);
1406 inputs_drop(s, info, n, result);
1408 #ifdef TIZEN_PCM_DUMP
1409 pa_sink_write_pcm_dump(s, result);
1414 /* Called from IO thread context */
1415 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1416 pa_mix_info info[MAX_MIX_CHANNELS];
1418 size_t length, block_size_max;
1420 pa_sink_assert_ref(s);
1421 pa_sink_assert_io_context(s);
1422 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1424 pa_assert(target->memblock);
1425 pa_assert(target->length > 0);
1426 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1428 pa_assert(!s->thread_info.rewind_requested);
1429 pa_assert(s->thread_info.rewind_nbytes == 0);
1431 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1432 pa_silence_memchunk(target, &s->sample_spec);
1438 length = target->length;
1439 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1440 if (length > block_size_max)
1441 length = pa_frame_align(block_size_max, &s->sample_spec);
1443 pa_assert(length > 0);
1445 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1448 if (target->length > length)
1449 target->length = length;
1451 pa_silence_memchunk(target, &s->sample_spec);
1452 } else if (n == 1) {
1455 if (target->length > length)
1456 target->length = length;
1458 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1460 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1461 pa_silence_memchunk(target, &s->sample_spec);
1465 vchunk = info[0].chunk;
1466 pa_memblock_ref(vchunk.memblock);
1468 if (vchunk.length > length)
1469 vchunk.length = length;
1471 if (!pa_cvolume_is_norm(&volume)) {
1472 pa_memchunk_make_writable(&vchunk, 0);
1473 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1476 pa_memchunk_memcpy(target, &vchunk);
1477 pa_memblock_unref(vchunk.memblock);
1483 ptr = pa_memblock_acquire(target->memblock);
1485 target->length = pa_mix(info, n,
1486 (uint8_t*) ptr + target->index, length,
1488 &s->thread_info.soft_volume,
1489 s->thread_info.soft_muted);
1491 pa_memblock_release(target->memblock);
1494 inputs_drop(s, info, n, target);
1496 #ifdef TIZEN_PCM_DUMP
1497 pa_sink_write_pcm_dump(s, target);
1502 /* Called from IO thread context */
1503 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1507 pa_sink_assert_ref(s);
1508 pa_sink_assert_io_context(s);
1509 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1511 pa_assert(target->memblock);
1512 pa_assert(target->length > 0);
1513 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1515 pa_assert(!s->thread_info.rewind_requested);
1516 pa_assert(s->thread_info.rewind_nbytes == 0);
1518 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1519 pa_silence_memchunk(target, &s->sample_spec);
1532 pa_sink_render_into(s, &chunk);
1541 /* Called from IO thread context */
1542 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1543 pa_sink_assert_ref(s);
1544 pa_sink_assert_io_context(s);
1545 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1546 pa_assert(length > 0);
1547 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1550 pa_assert(!s->thread_info.rewind_requested);
1551 pa_assert(s->thread_info.rewind_nbytes == 0);
1555 pa_sink_render(s, length, result);
1557 if (result->length < length) {
1560 pa_memchunk_make_writable(result, length);
1562 chunk.memblock = result->memblock;
1563 chunk.index = result->index + result->length;
1564 chunk.length = length - result->length;
1566 pa_sink_render_into_full(s, &chunk);
1568 result->length = length;
1574 /* Called from main thread */
1575 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1576 pa_sample_spec desired_spec;
1577 uint32_t default_rate = s->default_sample_rate;
1578 uint32_t alternate_rate = s->alternate_sample_rate;
1581 bool default_rate_is_usable = false;
1582 bool alternate_rate_is_usable = false;
1583 bool avoid_resampling = s->avoid_resampling;
1585 if (pa_sample_spec_equal(spec, &s->sample_spec))
1588 if (!s->reconfigure)
1592 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1593 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1598 if (PA_SINK_IS_RUNNING(s->state)) {
1599 pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1600 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1604 if (s->monitor_source) {
1605 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1606 pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1611 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1614 desired_spec = s->sample_spec;
1617 if (!avoid_resampling) {
1618 default_rate = alternate_rate = s->selected_sample_rate;
1619 desired_spec.format = s->selected_sample_format;
1623 /* We have to try to use the sink input format and rate */
1624 desired_spec.format = spec->format;
1625 desired_spec.rate = spec->rate;
1627 } else if (avoid_resampling) {
1628 /* We just try to set the sink input's sample rate if it's not too low */
1629 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1630 desired_spec.rate = spec->rate;
1631 desired_spec.format = spec->format;
1633 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1634 /* We can directly try to use this rate */
1635 desired_spec.rate = spec->rate;
1639 if (desired_spec.rate != spec->rate) {
1640 /* See if we can pick a rate that results in less resampling effort */
1641 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1642 default_rate_is_usable = true;
1643 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1644 default_rate_is_usable = true;
1645 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1646 alternate_rate_is_usable = true;
1647 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1648 alternate_rate_is_usable = true;
1650 if (alternate_rate_is_usable && !default_rate_is_usable)
1651 desired_spec.rate = alternate_rate;
1653 desired_spec.rate = default_rate;
1656 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1659 pa_log_info("desired spec is same as sink->sample_spec");
1666 if (!passthrough && pa_sink_used_by(s) > 0)
1669 pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1670 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1671 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1673 s->reconfigure(s, &desired_spec, passthrough);
1675 /* update monitor source as well */
1676 if (s->monitor_source && !passthrough)
1677 pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1678 pa_log_info("Reconfigured successfully");
1680 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1681 if (i->state == PA_SINK_INPUT_CORKED)
1682 pa_sink_input_update_resampler(i);
1685 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1688 /* Called from main thread */
1689 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1692 pa_sink_assert_ref(s);
1693 pa_assert_ctl_context();
1694 pa_assert(PA_SINK_IS_LINKED(s->state));
1696 /* The returned value is supposed to be in the time domain of the sound card! */
1698 if (s->state == PA_SINK_SUSPENDED)
1701 if (!(s->flags & PA_SINK_LATENCY))
1704 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1706 /* the return value is unsigned, so check that the offset can be added to usec without
1708 if (-s->port_latency_offset <= usec)
1709 usec += s->port_latency_offset;
1713 return (pa_usec_t)usec;
1716 /* Called from IO thread */
1717 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1721 pa_sink_assert_ref(s);
1722 pa_sink_assert_io_context(s);
1723 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1725 /* The returned value is supposed to be in the time domain of the sound card! */
1727 if (s->thread_info.state == PA_SINK_SUSPENDED)
1730 if (!(s->flags & PA_SINK_LATENCY))
1733 o = PA_MSGOBJECT(s);
1735 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1737 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1739 /* If allow_negative is false, the call should only return positive values, */
1740 usec += s->thread_info.port_latency_offset;
1741 if (!allow_negative && usec < 0)
1747 /* Called from the main thread (and also from the IO thread while the main
1748 * thread is waiting).
1750 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1751 * set. Instead, flat volume mode is detected by checking whether the root sink
1752 * has the flag set. */
1753 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1754 pa_sink_assert_ref(s);
1756 s = pa_sink_get_master(s);
1759 return (s->flags & PA_SINK_FLAT_VOLUME);
1764 /* Called from the main thread (and also from the IO thread while the main
1765 * thread is waiting). */
1766 pa_sink *pa_sink_get_master(pa_sink *s) {
1767 pa_sink_assert_ref(s);
1769 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1770 if (PA_UNLIKELY(!s->input_to_master))
1773 s = s->input_to_master->sink;
1779 /* Called from main context */
1780 bool pa_sink_is_filter(pa_sink *s) {
1781 pa_sink_assert_ref(s);
1783 return (s->input_to_master != NULL);
1786 /* Called from main context */
1787 bool pa_sink_is_passthrough(pa_sink *s) {
1788 pa_sink_input *alt_i;
1791 pa_sink_assert_ref(s);
1793 /* one and only one PASSTHROUGH input can possibly be connected */
1794 if (pa_idxset_size(s->inputs) == 1) {
1795 alt_i = pa_idxset_first(s->inputs, &idx);
1797 if (pa_sink_input_is_passthrough(alt_i))
1804 /* Called from main context */
1805 void pa_sink_enter_passthrough(pa_sink *s) {
1808 /* The sink implementation is reconfigured for passthrough in
1809 * pa_sink_reconfigure(). This function sets the PA core objects to
1810 * passthrough mode. */
1812 /* disable the monitor in passthrough mode */
1813 if (s->monitor_source) {
1814 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1815 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1818 /* set the volume to NORM */
1819 s->saved_volume = *pa_sink_get_volume(s, true);
1820 s->saved_save_volume = s->save_volume;
1822 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1823 pa_sink_set_volume(s, &volume, true, false);
1825 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1828 /* Called from main context */
1829 void pa_sink_leave_passthrough(pa_sink *s) {
1830 /* Unsuspend monitor */
1831 if (s->monitor_source) {
1832 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1833 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1836 /* Restore sink volume to what it was before we entered passthrough mode */
1837 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1839 pa_cvolume_init(&s->saved_volume);
1840 s->saved_save_volume = false;
1844 /* Called from main context. */
1845 static void compute_reference_ratio(pa_sink_input *i) {
1847 pa_cvolume remapped;
1851 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1854 * Calculates the reference ratio from the sink's reference
1855 * volume. This basically calculates:
1857 * i->reference_ratio = i->volume / i->sink->reference_volume
1860 remapped = i->sink->reference_volume;
1861 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1863 ratio = i->reference_ratio;
1865 for (c = 0; c < i->sample_spec.channels; c++) {
1867 /* We don't update when the sink volume is 0 anyway */
1868 if (remapped.values[c] <= PA_VOLUME_MUTED)
1871 /* Don't update the reference ratio unless necessary */
1872 if (pa_sw_volume_multiply(
1874 remapped.values[c]) == i->volume.values[c])
1877 ratio.values[c] = pa_sw_volume_divide(
1878 i->volume.values[c],
1879 remapped.values[c]);
1882 pa_sink_input_set_reference_ratio(i, &ratio);
1885 /* Called from main context. Only called for the root sink in volume sharing
1886 * cases, except for internal recursive calls. */
1887 static void compute_reference_ratios(pa_sink *s) {
1891 pa_sink_assert_ref(s);
1892 pa_assert_ctl_context();
1893 pa_assert(PA_SINK_IS_LINKED(s->state));
1894 pa_assert(pa_sink_flat_volume_enabled(s));
1896 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1897 compute_reference_ratio(i);
1899 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1900 && PA_SINK_IS_LINKED(i->origin_sink->state))
1901 compute_reference_ratios(i->origin_sink);
1905 /* Called from main context. Only called for the root sink in volume sharing
1906 * cases, except for internal recursive calls. */
1907 static void compute_real_ratios(pa_sink *s) {
1911 pa_sink_assert_ref(s);
1912 pa_assert_ctl_context();
1913 pa_assert(PA_SINK_IS_LINKED(s->state));
1914 pa_assert(pa_sink_flat_volume_enabled(s));
1916 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1918 pa_cvolume remapped;
1920 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1921 /* The origin sink uses volume sharing, so this input's real ratio
1922 * is handled as a special case - the real ratio must be 0 dB, and
1923 * as a result i->soft_volume must equal i->volume_factor. */
1924 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1925 i->soft_volume = i->volume_factor;
1927 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1928 compute_real_ratios(i->origin_sink);
1934 * This basically calculates:
1936 * i->real_ratio := i->volume / s->real_volume
1937 * i->soft_volume := i->real_ratio * i->volume_factor
1940 remapped = s->real_volume;
1941 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1943 i->real_ratio.channels = i->sample_spec.channels;
1944 i->soft_volume.channels = i->sample_spec.channels;
1946 for (c = 0; c < i->sample_spec.channels; c++) {
1948 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1949 /* We leave i->real_ratio untouched */
1950 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1954 /* Don't lose accuracy unless necessary */
1955 if (pa_sw_volume_multiply(
1956 i->real_ratio.values[c],
1957 remapped.values[c]) != i->volume.values[c])
1959 i->real_ratio.values[c] = pa_sw_volume_divide(
1960 i->volume.values[c],
1961 remapped.values[c]);
1963 i->soft_volume.values[c] = pa_sw_volume_multiply(
1964 i->real_ratio.values[c],
1965 i->volume_factor.values[c]);
1968 /* We don't copy the soft_volume to the thread_info data
1969 * here. That must be done by the caller */
1973 static pa_cvolume *cvolume_remap_minimal_impact(
1975 const pa_cvolume *template,
1976 const pa_channel_map *from,
1977 const pa_channel_map *to) {
1982 pa_assert(template);
1985 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1986 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1988 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1989 * mapping from sink input to sink volumes:
1991 * If template is a possible remapping from v it is used instead
1992 * of remapping anew.
1994 * If the channel maps don't match we set an all-channel volume on
1995 * the sink to ensure that changing a volume on one stream has no
1996 * effect that cannot be compensated for in another stream that
1997 * does not have the same channel map as the sink. */
1999 if (pa_channel_map_equal(from, to))
2003 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
2008 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
2012 /* Called from main thread. Only called for the root sink in volume sharing
2013 * cases, except for internal recursive calls. */
2014 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
2018 pa_sink_assert_ref(s);
2019 pa_assert(max_volume);
2020 pa_assert(channel_map);
2021 pa_assert(pa_sink_flat_volume_enabled(s));
2023 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2024 pa_cvolume remapped;
2026 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2027 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2028 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
2030 /* Ignore this input. The origin sink uses volume sharing, so this
2031 * input's volume will be set to be equal to the root sink's real
2032 * volume. Obviously this input's current volume must not then
2033 * affect what the root sink's real volume will be. */
2037 remapped = i->volume;
2038 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
2039 pa_cvolume_merge(max_volume, max_volume, &remapped);
2043 /* Called from main thread. Only called for the root sink in volume sharing
2044 * cases, except for internal recursive calls. */
2045 static bool has_inputs(pa_sink *s) {
2049 pa_sink_assert_ref(s);
2051 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2052 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2059 /* Called from main thread. Only called for the root sink in volume sharing
2060 * cases, except for internal recursive calls. */
2061 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2065 pa_sink_assert_ref(s);
2066 pa_assert(new_volume);
2067 pa_assert(channel_map);
2069 s->real_volume = *new_volume;
2070 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2072 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2073 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2074 if (pa_sink_flat_volume_enabled(s)) {
2075 pa_cvolume new_input_volume;
2077 /* Follow the root sink's real volume. */
2078 new_input_volume = *new_volume;
2079 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2080 pa_sink_input_set_volume_direct(i, &new_input_volume);
2081 compute_reference_ratio(i);
2084 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2085 update_real_volume(i->origin_sink, new_volume, channel_map);
2090 /* Called from main thread. Only called for the root sink in shared volume
2092 static void compute_real_volume(pa_sink *s) {
2093 pa_sink_assert_ref(s);
2094 pa_assert_ctl_context();
2095 pa_assert(PA_SINK_IS_LINKED(s->state));
2096 pa_assert(pa_sink_flat_volume_enabled(s));
2097 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2099 /* This determines the maximum volume of all streams and sets
2100 * s->real_volume accordingly. */
2102 if (!has_inputs(s)) {
2103 /* In the special case that we have no sink inputs we leave the
2104 * volume unmodified. */
2105 update_real_volume(s, &s->reference_volume, &s->channel_map);
2109 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2111 /* First let's determine the new maximum volume of all inputs
2112 * connected to this sink */
2113 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2114 update_real_volume(s, &s->real_volume, &s->channel_map);
2116 /* Then, let's update the real ratios/soft volumes of all inputs
2117 * connected to this sink */
2118 compute_real_ratios(s);
2121 /* Called from main thread. Only called for the root sink in shared volume
2122 * cases, except for internal recursive calls. */
2123 static void propagate_reference_volume(pa_sink *s) {
2127 pa_sink_assert_ref(s);
2128 pa_assert_ctl_context();
2129 pa_assert(PA_SINK_IS_LINKED(s->state));
2130 pa_assert(pa_sink_flat_volume_enabled(s));
2132 /* This is called whenever the sink volume changes that is not
2133 * caused by a sink input volume change. We need to fix up the
2134 * sink input volumes accordingly */
2136 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2137 pa_cvolume new_volume;
2139 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2140 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2141 propagate_reference_volume(i->origin_sink);
2143 /* Since the origin sink uses volume sharing, this input's volume
2144 * needs to be updated to match the root sink's real volume, but
2145 * that will be done later in update_real_volume(). */
2149 /* This basically calculates:
2151 * i->volume := s->reference_volume * i->reference_ratio */
2153 new_volume = s->reference_volume;
2154 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2155 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2156 pa_sink_input_set_volume_direct(i, &new_volume);
2160 /* Called from main thread. Only called for the root sink in volume sharing
2161 * cases, except for internal recursive calls. The return value indicates
2162 * whether any reference volume actually changed. */
2163 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2165 bool reference_volume_changed;
2169 pa_sink_assert_ref(s);
2170 pa_assert(PA_SINK_IS_LINKED(s->state));
2172 pa_assert(channel_map);
2173 pa_assert(pa_cvolume_valid(v));
2176 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2178 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2179 pa_sink_set_reference_volume_direct(s, &volume);
2181 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2183 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2184 /* If the root sink's volume doesn't change, then there can't be any
2185 * changes in the other sinks in the sink tree either.
2187 * It's probably theoretically possible that even if the root sink's
2188 * volume changes slightly, some filter sink doesn't change its volume
2189 * due to rounding errors. If that happens, we still want to propagate
2190 * the changed root sink volume to the sinks connected to the
2191 * intermediate sink that didn't change its volume. This theoretical
2192 * possibility is the reason why we have that !(s->flags &
2193 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2194 * notice even if we returned here false always if
2195 * reference_volume_changed is false. */
2198 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2199 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2200 && PA_SINK_IS_LINKED(i->origin_sink->state))
2201 update_reference_volume(i->origin_sink, v, channel_map, false);
2207 /* Called from main thread */
2208 void pa_sink_set_volume(
2210 const pa_cvolume *volume,
2214 pa_cvolume new_reference_volume;
2217 pa_sink_assert_ref(s);
2218 pa_assert_ctl_context();
2219 pa_assert(PA_SINK_IS_LINKED(s->state));
2220 pa_assert(!volume || pa_cvolume_valid(volume));
2221 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2222 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2224 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2225 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2226 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2227 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2231 /* In case of volume sharing, the volume is set for the root sink first,
2232 * from which it's then propagated to the sharing sinks. */
2233 root_sink = pa_sink_get_master(s);
2235 if (PA_UNLIKELY(!root_sink))
2238 /* As a special exception we accept mono volumes on all sinks --
2239 * even on those with more complex channel maps */
2242 if (pa_cvolume_compatible(volume, &s->sample_spec))
2243 new_reference_volume = *volume;
2245 new_reference_volume = s->reference_volume;
2246 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2249 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2251 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2252 if (pa_sink_flat_volume_enabled(root_sink)) {
2253 /* OK, propagate this volume change back to the inputs */
2254 propagate_reference_volume(root_sink);
2256 /* And now recalculate the real volume */
2257 compute_real_volume(root_sink);
2259 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2263 /* If volume is NULL we synchronize the sink's real and
2264 * reference volumes with the stream volumes. */
2266 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2268 /* Ok, let's determine the new real volume */
2269 compute_real_volume(root_sink);
2271 /* Let's 'push' the reference volume if necessary */
2272 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2273 /* If the sink and its root don't have the same number of channels, we need to remap */
2274 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2275 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2276 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2278 /* Now that the reference volume is updated, we can update the streams'
2279 * reference ratios. */
2280 compute_reference_ratios(root_sink);
2283 if (root_sink->set_volume) {
2284 /* If we have a function set_volume(), then we do not apply a
2285 * soft volume by default. However, set_volume() is free to
2286 * apply one to root_sink->soft_volume */
2288 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2289 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2290 root_sink->set_volume(root_sink);
2293 /* If we have no function set_volume(), then the soft volume
2294 * becomes the real volume */
2295 root_sink->soft_volume = root_sink->real_volume;
2297 /* This tells the sink that soft volume and/or real volume changed */
2299 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2302 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2303 * Only to be called by sink implementor */
2304 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2306 pa_sink_assert_ref(s);
2307 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2309 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2310 pa_sink_assert_io_context(s);
2312 pa_assert_ctl_context();
2315 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2317 s->soft_volume = *volume;
2319 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2320 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2322 s->thread_info.soft_volume = s->soft_volume;
2325 /* Called from the main thread. Only called for the root sink in volume sharing
2326 * cases, except for internal recursive calls. */
2327 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2331 pa_sink_assert_ref(s);
2332 pa_assert(old_real_volume);
2333 pa_assert_ctl_context();
2334 pa_assert(PA_SINK_IS_LINKED(s->state));
2336 /* This is called when the hardware's real volume changes due to
2337 * some external event. We copy the real volume into our
2338 * reference volume and then rebuild the stream volumes based on
2339 * i->real_ratio which should stay fixed. */
2341 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2342 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2345 /* 1. Make the real volume the reference volume */
2346 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2349 if (pa_sink_flat_volume_enabled(s)) {
2351 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2352 pa_cvolume new_volume;
2354 /* 2. Since the sink's reference and real volumes are equal
2355 * now our ratios should be too. */
2356 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2358 /* 3. Recalculate the new stream reference volume based on the
2359 * reference ratio and the sink's reference volume.
2361 * This basically calculates:
2363 * i->volume = s->reference_volume * i->reference_ratio
2365 * This is identical to propagate_reference_volume() */
2366 new_volume = s->reference_volume;
2367 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2368 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2369 pa_sink_input_set_volume_direct(i, &new_volume);
2371 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2372 && PA_SINK_IS_LINKED(i->origin_sink->state))
2373 propagate_real_volume(i->origin_sink, old_real_volume);
2377 /* Something got changed in the hardware. It probably makes sense
2378 * to save changed hw settings given that hw volume changes not
2379 * triggered by PA are almost certainly done by the user. */
2380 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2381 s->save_volume = true;
2384 /* Called from io thread */
2385 void pa_sink_update_volume_and_mute(pa_sink *s) {
2387 pa_sink_assert_io_context(s);
2389 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2392 /* Called from main thread */
2393 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2394 pa_sink_assert_ref(s);
2395 pa_assert_ctl_context();
2396 pa_assert(PA_SINK_IS_LINKED(s->state));
2398 if (s->refresh_volume || force_refresh) {
2399 struct pa_cvolume old_real_volume;
2401 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2403 old_real_volume = s->real_volume;
2405 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2408 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2410 update_real_volume(s, &s->real_volume, &s->channel_map);
2411 propagate_real_volume(s, &old_real_volume);
2414 return &s->reference_volume;
2417 /* Called from main thread. In volume sharing cases, only the root sink may
2419 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2420 pa_cvolume old_real_volume;
2422 pa_sink_assert_ref(s);
2423 pa_assert_ctl_context();
2424 pa_assert(PA_SINK_IS_LINKED(s->state));
2425 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2427 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2429 old_real_volume = s->real_volume;
2430 update_real_volume(s, new_real_volume, &s->channel_map);
2431 propagate_real_volume(s, &old_real_volume);
2434 /* Called from main thread */
2435 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2438 pa_sink_assert_ref(s);
2439 pa_assert_ctl_context();
2441 old_muted = s->muted;
2443 if (mute == old_muted) {
2444 s->save_muted |= save;
2449 s->save_muted = save;
2451 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2452 s->set_mute_in_progress = true;
2454 s->set_mute_in_progress = false;
2457 if (!PA_SINK_IS_LINKED(s->state))
2460 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2461 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2466 /* Called from main thread */
2467 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2469 pa_sink_assert_ref(s);
2470 pa_assert_ctl_context();
2471 pa_assert(PA_SINK_IS_LINKED(s->state));
2473 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2476 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2477 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2478 pa_sink_mute_changed(s, mute);
2480 if (s->get_mute(s, &mute) >= 0)
2481 pa_sink_mute_changed(s, mute);
2488 /* Called from main thread */
2489 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2490 pa_sink_assert_ref(s);
2491 pa_assert_ctl_context();
2492 pa_assert(PA_SINK_IS_LINKED(s->state));
2494 if (s->set_mute_in_progress)
2497 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2498 * but we must have this here also, because the save parameter of
2499 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2500 * the mute state when it shouldn't be saved). */
2501 if (new_muted == s->muted)
2504 pa_sink_set_mute(s, new_muted, true);
2507 /* Called from main thread */
2509 void pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2511 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2513 pa_sink_assert_ref(s);
2514 pa_assert_ctl_context();
2517 pa_proplist_update(s->proplist, mode, p);
2519 if (PA_SINK_IS_LINKED(s->state)) {
2520 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2521 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2530 /* Called from main thread */
2531 void pa_sink_update_proplist_remote_access_permission(pa_sink *s, bool allowed) {
2532 pa_proplist* p = NULL;
2534 pa_sink_assert_ref(s);
2535 pa_assert_ctl_context();
2537 p = pa_proplist_new();
2539 if (pa_proplist_set_remote_access_permission(p, allowed) == 0)
2540 pa_sink_update_proplist(s, PA_UPDATE_REPLACE, p);
2542 pa_log_error("set remote access permission %d on proplist %p failed", allowed, p);
2544 pa_proplist_free(p);
2546 #endif /* __TIZEN__ */
2548 /* Called from main thread */
2549 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2550 void pa_sink_set_description(pa_sink *s, const char *description) {
2552 pa_sink_assert_ref(s);
2553 pa_assert_ctl_context();
2555 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2558 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2560 if (old && description && pa_streq(old, description))
2564 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2566 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2568 if (s->monitor_source) {
2571 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2572 pa_source_set_description(s->monitor_source, n);
2576 if (PA_SINK_IS_LINKED(s->state)) {
2577 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2578 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2582 /* Called from main thread */
2583 unsigned pa_sink_linked_by(pa_sink *s) {
2586 pa_sink_assert_ref(s);
2587 pa_assert_ctl_context();
2588 pa_assert(PA_SINK_IS_LINKED(s->state));
2590 ret = pa_idxset_size(s->inputs);
2592 /* We add in the number of streams connected to us here. Please
2593 * note the asymmetry to pa_sink_used_by()! */
2595 if (s->monitor_source)
2596 ret += pa_source_linked_by(s->monitor_source);
2601 /* Called from main thread */
2602 unsigned pa_sink_used_by(pa_sink *s) {
2605 pa_sink_assert_ref(s);
2606 pa_assert_ctl_context();
2607 pa_assert(PA_SINK_IS_LINKED(s->state));
2609 ret = pa_idxset_size(s->inputs);
2610 pa_assert(ret >= s->n_corked);
2612 /* Streams connected to our monitor source do not matter for
2613 * pa_sink_used_by()!.*/
2615 return ret - s->n_corked;
2618 /* Called from main thread */
2619 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2624 pa_sink_assert_ref(s);
2625 pa_assert_ctl_context();
2627 if (!PA_SINK_IS_LINKED(s->state))
2632 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2633 if (i == ignore_input)
2636 /* We do not assert here. It is perfectly valid for a sink input to
2637 * be in the INIT state (i.e. created, marked done but not yet put)
2638 * and we should not care if it's unlinked as it won't contribute
2639 * towards our busy status.
2641 if (!PA_SINK_INPUT_IS_LINKED(i->state))
2644 if (i->state == PA_SINK_INPUT_CORKED)
2647 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2653 if (s->monitor_source)
2654 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2659 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2661 case PA_SINK_INIT: return "INIT";
2662 case PA_SINK_IDLE: return "IDLE";
2663 case PA_SINK_RUNNING: return "RUNNING";
2664 case PA_SINK_SUSPENDED: return "SUSPENDED";
2665 case PA_SINK_UNLINKED: return "UNLINKED";
2666 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2669 pa_assert_not_reached();
2672 /* Called from the IO thread */
2673 static void sync_input_volumes_within_thread(pa_sink *s) {
2677 pa_sink_assert_ref(s);
2678 pa_sink_assert_io_context(s);
2680 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2681 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2684 i->thread_info.soft_volume = i->soft_volume;
2685 pa_sink_input_request_rewind(i, 0, true, false, false);
2689 /* Called from the IO thread. Only called for the root sink in volume sharing
2690 * cases, except for internal recursive calls. */
2691 static void set_shared_volume_within_thread(pa_sink *s) {
2692 pa_sink_input *i = NULL;
2695 pa_sink_assert_ref(s);
2697 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2699 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2700 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2701 set_shared_volume_within_thread(i->origin_sink);
2705 /* Called from IO thread, except when it is not */
2706 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2707 pa_sink *s = PA_SINK(o);
2708 pa_sink_assert_ref(s);
2710 switch ((pa_sink_message_t) code) {
2712 case PA_SINK_MESSAGE_ADD_INPUT: {
2713 pa_sink_input *i = PA_SINK_INPUT(userdata);
2715 /* If you change anything here, make sure to change the
2716 * sink input handling a few lines down at
2717 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2719 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2721 /* Since the caller sleeps in pa_sink_input_put(), we can
2722 * safely access data outside of thread_info even though
2725 if ((i->thread_info.sync_prev = i->sync_prev)) {
2726 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2727 pa_assert(i->sync_prev->sync_next == i);
2728 i->thread_info.sync_prev->thread_info.sync_next = i;
2731 if ((i->thread_info.sync_next = i->sync_next)) {
2732 pa_assert(i->sink == i->thread_info.sync_next->sink);
2733 pa_assert(i->sync_next->sync_prev == i);
2734 i->thread_info.sync_next->thread_info.sync_prev = i;
2737 pa_sink_input_attach(i);
2739 pa_sink_input_set_state_within_thread(i, i->state);
2741 /* The requested latency of the sink input needs to be fixed up and
2742 * then configured on the sink. If this causes the sink latency to
2743 * go down, the sink implementor is responsible for doing a rewind
2744 * in the update_requested_latency() callback to ensure that the
2745 * sink buffer doesn't contain more data than what the new latency
2748 * XXX: Does it really make sense to push this responsibility to
2749 * the sink implementors? Wouldn't it be better to do it once in
2750 * the core than many times in the modules? */
2752 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2753 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2755 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2756 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2758 /* We don't rewind here automatically. This is left to the
2759 * sink input implementor because some sink inputs need a
2760 * slow start, i.e. need some time to buffer client
2761 * samples before beginning streaming.
2763 * XXX: Does it really make sense to push this functionality to
2764 * the sink implementors? Wouldn't it be better to do it once in
2765 * the core than many times in the modules? */
2767 /* In flat volume mode we need to update the volume as
2769 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2772 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2773 pa_sink_input *i = PA_SINK_INPUT(userdata);
2775 /* If you change anything here, make sure to change the
2776 * sink input handling a few lines down at
2777 * PA_SINK_MESSAGE_START_MOVE, too. */
2779 pa_sink_input_detach(i);
2781 pa_sink_input_set_state_within_thread(i, i->state);
2783 /* Since the caller sleeps in pa_sink_input_unlink(),
2784 * we can safely access data outside of thread_info even
2785 * though it is mutable */
2787 pa_assert(!i->sync_prev);
2788 pa_assert(!i->sync_next);
2790 if (i->thread_info.sync_prev) {
2791 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2792 i->thread_info.sync_prev = NULL;
2795 if (i->thread_info.sync_next) {
2796 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2797 i->thread_info.sync_next = NULL;
2800 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2801 pa_sink_invalidate_requested_latency(s, true);
2802 pa_sink_request_rewind(s, (size_t) -1);
2804 /* In flat volume mode we need to update the volume as
2806 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2809 case PA_SINK_MESSAGE_START_MOVE: {
2810 pa_sink_input *i = PA_SINK_INPUT(userdata);
2812 /* We don't support moving synchronized streams. */
2813 pa_assert(!i->sync_prev);
2814 pa_assert(!i->sync_next);
2815 pa_assert(!i->thread_info.sync_next);
2816 pa_assert(!i->thread_info.sync_prev);
2818 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2820 size_t sink_nbytes, total_nbytes;
2822 /* The old sink probably has some audio from this
2823 * stream in its buffer. We want to "take it back" as
2824 * much as possible and play it to the new sink. We
2825 * don't know at this point how much the old sink can
2826 * rewind. We have to pick something, and that
2827 * something is the full latency of the old sink here.
2828 * So we rewind the stream buffer by the sink latency
2829 * amount, which may be more than what we should
2830 * rewind. This can result in a chunk of audio being
2831 * played both to the old sink and the new sink.
2833 * FIXME: Fix this code so that we don't have to make
2834 * guesses about how much the sink will actually be
2835 * able to rewind. If someone comes up with a solution
2836 * for this, something to note is that the part of the
2837 * latency that the old sink couldn't rewind should
2838 * ideally be compensated after the stream has moved
2839 * to the new sink by adding silence. The new sink
2840 * most likely can't start playing the moved stream
2841 * immediately, and that gap should be removed from
2842 * the "compensation silence" (at least at the time of
2843 * writing this, the move finish code will actually
2844 * already take care of dropping the new sink's
2845 * unrewindable latency, so taking into account the
2846 * unrewindable latency of the old sink is the only
2849 * The render_memblockq contents are discarded,
2850 * because when the sink changes, the format of the
2851 * audio stored in the render_memblockq may change
2852 * too, making the stored audio invalid. FIXME:
2853 * However, the read and write indices are moved back
2854 * the same amount, so if they are not the same now,
2855 * they won't be the same after the rewind either. If
2856 * the write index of the render_memblockq is ahead of
2857 * the read index, then the render_memblockq will feed
2858 * the new sink some silence first, which it shouldn't
2859 * do. The write index should be flushed to be the
2860 * same as the read index. */
2862 /* Get the latency of the sink */
2863 usec = pa_sink_get_latency_within_thread(s, false);
2864 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2865 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2867 if (total_nbytes > 0) {
2868 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2869 i->thread_info.rewrite_flush = true;
2870 pa_sink_input_process_rewind(i, sink_nbytes);
2874 pa_sink_input_detach(i);
2876 /* Let's remove the sink input ...*/
2877 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2879 pa_sink_invalidate_requested_latency(s, true);
2881 pa_log_debug("Requesting rewind due to started move");
2882 pa_sink_request_rewind(s, (size_t) -1);
2884 /* In flat volume mode we need to update the volume as
2886 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2889 case PA_SINK_MESSAGE_FINISH_MOVE: {
2890 pa_sink_input *i = PA_SINK_INPUT(userdata);
2892 /* We don't support moving synchronized streams. */
2893 pa_assert(!i->sync_prev);
2894 pa_assert(!i->sync_next);
2895 pa_assert(!i->thread_info.sync_next);
2896 pa_assert(!i->thread_info.sync_prev);
2898 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2900 pa_sink_input_attach(i);
2902 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2906 /* In the ideal case the new sink would start playing
2907 * the stream immediately. That requires the sink to
2908 * be able to rewind all of its latency, which usually
2909 * isn't possible, so there will probably be some gap
2910 * before the moved stream becomes audible. We then
2911 * have two possibilities: 1) start playing the stream
2912 * from where it is now, or 2) drop the unrewindable
2913 * latency of the sink from the stream. With option 1
2914 * we won't lose any audio but the stream will have a
2915 * pause. With option 2 we may lose some audio but the
2916 * stream time will be somewhat in sync with the wall
2917 * clock. Lennart seems to have chosen option 2 (one
2918 * of the reasons might have been that option 1 is
2919 * actually much harder to implement), so we drop the
2920 * latency of the new sink from the moved stream and
2921 * hope that the sink will undo most of that in the
2924 /* Get the latency of the sink */
2925 usec = pa_sink_get_latency_within_thread(s, false);
2926 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2929 pa_sink_input_drop(i, nbytes);
2931 pa_log_debug("Requesting rewind due to finished move");
2932 pa_sink_request_rewind(s, nbytes);
2935 /* Updating the requested sink latency has to be done
2936 * after the sink rewind request, not before, because
2937 * otherwise the sink may limit the rewind amount
2940 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2941 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2943 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2944 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2946 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2949 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2950 pa_sink *root_sink = pa_sink_get_master(s);
2952 if (PA_LIKELY(root_sink))
2953 set_shared_volume_within_thread(root_sink);
2958 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2960 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2962 pa_sink_volume_change_push(s);
2964 /* Fall through ... */
2966 case PA_SINK_MESSAGE_SET_VOLUME:
2968 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2969 s->thread_info.soft_volume = s->soft_volume;
2970 pa_sink_request_rewind(s, (size_t) -1);
2973 /* Fall through ... */
2975 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2976 sync_input_volumes_within_thread(s);
2979 case PA_SINK_MESSAGE_GET_VOLUME:
2981 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2983 pa_sink_volume_change_flush(s);
2984 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2987 /* In case sink implementor reset SW volume. */
2988 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2989 s->thread_info.soft_volume = s->soft_volume;
2990 pa_sink_request_rewind(s, (size_t) -1);
2995 case PA_SINK_MESSAGE_SET_MUTE:
2997 if (s->thread_info.soft_muted != s->muted) {
2998 s->thread_info.soft_muted = s->muted;
2999 pa_sink_request_rewind(s, (size_t) -1);
3002 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
3007 case PA_SINK_MESSAGE_GET_MUTE:
3009 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
3010 return s->get_mute(s, userdata);
3014 case PA_SINK_MESSAGE_SET_STATE: {
3015 struct set_state_data *data = userdata;
3016 bool suspend_change =
3017 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
3018 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
3020 if (s->set_state_in_io_thread) {
3023 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
3027 s->thread_info.state = data->state;
3029 if (s->thread_info.state == PA_SINK_SUSPENDED) {
3030 s->thread_info.rewind_nbytes = 0;
3031 s->thread_info.rewind_requested = false;
3034 if (suspend_change) {
3038 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
3039 if (i->suspend_within_thread)
3040 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
3046 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
3048 pa_usec_t *usec = userdata;
3049 *usec = pa_sink_get_requested_latency_within_thread(s);
3051 /* Yes, that's right, the IO thread will see -1 when no
3052 * explicit requested latency is configured, the main
3053 * thread will see max_latency */
3054 if (*usec == (pa_usec_t) -1)
3055 *usec = s->thread_info.max_latency;
3060 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
3061 pa_usec_t *r = userdata;
3063 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
3068 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
3069 pa_usec_t *r = userdata;
3071 r[0] = s->thread_info.min_latency;
3072 r[1] = s->thread_info.max_latency;
3077 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3079 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3082 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3084 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3087 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3089 *((size_t*) userdata) = s->thread_info.max_rewind;
3092 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3094 *((size_t*) userdata) = s->thread_info.max_request;
3097 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3099 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3102 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3104 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3107 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3108 /* This message is sent from IO-thread and handled in main thread. */
3109 pa_assert_ctl_context();
3111 /* Make sure we're not messing with main thread when no longer linked */
3112 if (!PA_SINK_IS_LINKED(s->state))
3115 pa_sink_get_volume(s, true);
3116 pa_sink_get_mute(s, true);
3119 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3120 s->thread_info.port_latency_offset = offset;
3123 case PA_SINK_MESSAGE_GET_LATENCY:
3124 case PA_SINK_MESSAGE_MAX:
3131 /* Called from main thread */
3132 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3137 pa_core_assert_ref(c);
3138 pa_assert_ctl_context();
3139 pa_assert(cause != 0);
3141 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3144 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3151 /* Called from IO thread */
3152 void pa_sink_detach_within_thread(pa_sink *s) {
3156 pa_sink_assert_ref(s);
3157 pa_sink_assert_io_context(s);
3158 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3160 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3161 pa_sink_input_detach(i);
3163 if (s->monitor_source)
3164 pa_source_detach_within_thread(s->monitor_source);
3167 /* Called from IO thread */
3168 void pa_sink_attach_within_thread(pa_sink *s) {
3172 pa_sink_assert_ref(s);
3173 pa_sink_assert_io_context(s);
3174 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3176 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3177 pa_sink_input_attach(i);
3179 if (s->monitor_source)
3180 pa_source_attach_within_thread(s->monitor_source);
3183 /* Called from IO thread */
3184 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3185 pa_sink_assert_ref(s);
3186 pa_sink_assert_io_context(s);
3187 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3189 if (nbytes == (size_t) -1)
3190 nbytes = s->thread_info.max_rewind;
3192 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3194 if (s->thread_info.rewind_requested &&
3195 nbytes <= s->thread_info.rewind_nbytes)
3198 s->thread_info.rewind_nbytes = nbytes;
3199 s->thread_info.rewind_requested = true;
3201 if (s->request_rewind)
3202 s->request_rewind(s);
3205 /* Called from IO thread */
3206 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3207 pa_usec_t result = (pa_usec_t) -1;
3210 pa_usec_t monitor_latency;
3212 pa_sink_assert_ref(s);
3213 pa_sink_assert_io_context(s);
3215 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3216 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3218 if (s->thread_info.requested_latency_valid)
3219 return s->thread_info.requested_latency;
3221 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3222 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3223 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3224 result = i->thread_info.requested_sink_latency;
3226 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3228 if (monitor_latency != (pa_usec_t) -1 &&
3229 (result == (pa_usec_t) -1 || result > monitor_latency))
3230 result = monitor_latency;
3232 if (result != (pa_usec_t) -1)
3233 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3235 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3236 /* Only cache if properly initialized */
3237 s->thread_info.requested_latency = result;
3238 s->thread_info.requested_latency_valid = true;
3244 /* Called from main thread */
3245 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3248 pa_sink_assert_ref(s);
3249 pa_assert_ctl_context();
3250 pa_assert(PA_SINK_IS_LINKED(s->state));
3252 if (s->state == PA_SINK_SUSPENDED)
3255 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3260 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3261 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3265 pa_sink_assert_ref(s);
3266 pa_sink_assert_io_context(s);
3268 if (max_rewind == s->thread_info.max_rewind)
3271 s->thread_info.max_rewind = max_rewind;
3273 if (PA_SINK_IS_LINKED(s->thread_info.state))
3274 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3275 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3277 if (s->monitor_source)
3278 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3281 /* Called from main thread */
3282 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3283 pa_sink_assert_ref(s);
3284 pa_assert_ctl_context();
3286 if (PA_SINK_IS_LINKED(s->state))
3287 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3289 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3292 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3293 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3296 pa_sink_assert_ref(s);
3297 pa_sink_assert_io_context(s);
3299 if (max_request == s->thread_info.max_request)
3302 s->thread_info.max_request = max_request;
3304 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3307 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3308 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3312 /* Called from main thread */
3313 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3314 pa_sink_assert_ref(s);
3315 pa_assert_ctl_context();
3317 if (PA_SINK_IS_LINKED(s->state))
3318 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3320 pa_sink_set_max_request_within_thread(s, max_request);
3323 /* Called from IO thread */
3324 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3328 pa_sink_assert_ref(s);
3329 pa_sink_assert_io_context(s);
3331 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3332 s->thread_info.requested_latency_valid = false;
3336 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3338 if (s->update_requested_latency)
3339 s->update_requested_latency(s);
3341 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3342 if (i->update_sink_requested_latency)
3343 i->update_sink_requested_latency(i);
3347 /* Called from main thread */
3348 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3349 pa_sink_assert_ref(s);
3350 pa_assert_ctl_context();
3352 /* min_latency == 0: no limit
3353 * min_latency anything else: specified limit
3355 * Similar for max_latency */
3357 if (min_latency < ABSOLUTE_MIN_LATENCY)
3358 min_latency = ABSOLUTE_MIN_LATENCY;
3360 if (max_latency <= 0 ||
3361 max_latency > ABSOLUTE_MAX_LATENCY)
3362 max_latency = ABSOLUTE_MAX_LATENCY;
3364 pa_assert(min_latency <= max_latency);
3366 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3367 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3368 max_latency == ABSOLUTE_MAX_LATENCY) ||
3369 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3371 if (PA_SINK_IS_LINKED(s->state)) {
3377 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3379 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3382 /* Called from main thread */
3383 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3384 pa_sink_assert_ref(s);
3385 pa_assert_ctl_context();
3386 pa_assert(min_latency);
3387 pa_assert(max_latency);
3389 if (PA_SINK_IS_LINKED(s->state)) {
3390 pa_usec_t r[2] = { 0, 0 };
3392 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3394 *min_latency = r[0];
3395 *max_latency = r[1];
3397 *min_latency = s->thread_info.min_latency;
3398 *max_latency = s->thread_info.max_latency;
3402 /* Called from IO thread */
3403 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3404 pa_sink_assert_ref(s);
3405 pa_sink_assert_io_context(s);
3407 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3408 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3409 pa_assert(min_latency <= max_latency);
3411 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3412 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3413 max_latency == ABSOLUTE_MAX_LATENCY) ||
3414 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3416 if (s->thread_info.min_latency == min_latency &&
3417 s->thread_info.max_latency == max_latency)
3420 s->thread_info.min_latency = min_latency;
3421 s->thread_info.max_latency = max_latency;
3423 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3427 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3428 if (i->update_sink_latency_range)
3429 i->update_sink_latency_range(i);
3432 pa_sink_invalidate_requested_latency(s, false);
3434 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3437 /* Called from main thread */
3438 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3439 pa_sink_assert_ref(s);
3440 pa_assert_ctl_context();
3442 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3443 pa_assert(latency == 0);
3447 if (latency < ABSOLUTE_MIN_LATENCY)
3448 latency = ABSOLUTE_MIN_LATENCY;
3450 if (latency > ABSOLUTE_MAX_LATENCY)
3451 latency = ABSOLUTE_MAX_LATENCY;
3453 if (PA_SINK_IS_LINKED(s->state))
3454 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3456 s->thread_info.fixed_latency = latency;
3458 pa_source_set_fixed_latency(s->monitor_source, latency);
3461 /* Called from main thread */
3462 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3465 pa_sink_assert_ref(s);
3466 pa_assert_ctl_context();
3468 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3471 if (PA_SINK_IS_LINKED(s->state))
3472 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3474 latency = s->thread_info.fixed_latency;
3479 /* Called from IO thread */
3480 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3481 pa_sink_assert_ref(s);
3482 pa_sink_assert_io_context(s);
3484 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3485 pa_assert(latency == 0);
3486 s->thread_info.fixed_latency = 0;
3488 if (s->monitor_source)
3489 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3494 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3495 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3497 if (s->thread_info.fixed_latency == latency)
3500 s->thread_info.fixed_latency = latency;
3502 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3506 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3507 if (i->update_sink_fixed_latency)
3508 i->update_sink_fixed_latency(i);
3511 pa_sink_invalidate_requested_latency(s, false);
3513 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3516 /* Called from main context */
3517 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3518 pa_sink_assert_ref(s);
3520 s->port_latency_offset = offset;
3522 if (PA_SINK_IS_LINKED(s->state))
3523 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3525 s->thread_info.port_latency_offset = offset;
3527 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3530 /* Called from main context */
3531 size_t pa_sink_get_max_rewind(pa_sink *s) {
3533 pa_assert_ctl_context();
3534 pa_sink_assert_ref(s);
3536 if (!PA_SINK_IS_LINKED(s->state))
3537 return s->thread_info.max_rewind;
3539 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3544 /* Called from main context */
3545 size_t pa_sink_get_max_request(pa_sink *s) {
3547 pa_sink_assert_ref(s);
3548 pa_assert_ctl_context();
3550 if (!PA_SINK_IS_LINKED(s->state))
3551 return s->thread_info.max_request;
3553 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3558 /* Called from main context */
3559 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3560 pa_device_port *port;
3562 pa_sink_assert_ref(s);
3563 pa_assert_ctl_context();
3566 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3567 return -PA_ERR_NOTIMPLEMENTED;
3571 return -PA_ERR_NOENTITY;
3573 if (!(port = pa_hashmap_get(s->ports, name)))
3574 return -PA_ERR_NOENTITY;
3576 if (s->active_port == port) {
3577 s->save_port = s->save_port || save;
3581 s->port_changing = true;
3583 if (s->set_port(s, port) < 0)
3584 return -PA_ERR_NOENTITY;
3586 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3588 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3590 s->active_port = port;
3591 s->save_port = save;
3593 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3595 /* The active port affects the default sink selection. */
3596 pa_core_update_default_sink(s->core);
3598 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3600 s->port_changing = false;
3605 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3606 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3610 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3613 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3615 if (pa_streq(ff, "microphone"))
3616 t = "audio-input-microphone";
3617 else if (pa_streq(ff, "webcam"))
3619 else if (pa_streq(ff, "computer"))
3621 else if (pa_streq(ff, "handset"))
3623 else if (pa_streq(ff, "portable"))
3624 t = "multimedia-player";
3625 else if (pa_streq(ff, "tv"))
3626 t = "video-display";
3629 * The following icons are not part of the icon naming spec,
3630 * because Rodney Dawes sucks as the maintainer of that spec.
3632 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3634 else if (pa_streq(ff, "headset"))
3635 t = "audio-headset";
3636 else if (pa_streq(ff, "headphone"))
3637 t = "audio-headphones";
3638 else if (pa_streq(ff, "speaker"))
3639 t = "audio-speakers";
3640 else if (pa_streq(ff, "hands-free"))
3641 t = "audio-handsfree";
3645 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3646 if (pa_streq(c, "modem"))
3653 t = "audio-input-microphone";
3656 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3657 if (strstr(profile, "analog"))
3659 else if (strstr(profile, "iec958"))
3661 else if (strstr(profile, "hdmi"))
3665 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3667 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3672 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3673 const char *s, *d = NULL, *k;
3676 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3680 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3684 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3685 if (pa_streq(s, "internal"))
3686 d = _("Built-in Audio");
3689 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3690 if (pa_streq(s, "modem"))
3694 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3699 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3702 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3704 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3709 bool pa_device_init_intended_roles(pa_proplist *p) {
3713 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3716 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3717 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3718 || pa_streq(s, "headset")) {
3719 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3726 unsigned pa_device_init_priority(pa_proplist *p) {
3728 unsigned priority = 0;
3732 /* JACK sinks and sources get very high priority so that we'll switch the
3733 * default devices automatically when jackd starts and
3734 * module-jackdbus-detect creates the jack sink and source. */
3735 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_API))) {
3736 if (pa_streq(s, "jack"))
3740 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3742 if (pa_streq(s, "sound"))
3744 else if (!pa_streq(s, "modem"))
3748 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3750 if (pa_streq(s, "headphone"))
3752 else if (pa_streq(s, "hifi"))
3754 else if (pa_streq(s, "speaker"))
3756 else if (pa_streq(s, "portable"))
3760 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3762 if (pa_streq(s, "bluetooth"))
3764 else if (pa_streq(s, "usb"))
3766 else if (pa_streq(s, "pci"))
3770 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3772 if (pa_startswith(s, "analog-")) {
3775 /* If an analog device has an intended role of "phone", it probably
3776 * co-exists with another device that is meant for everything else,
3777 * and that other device should have higher priority than the phone
3779 if (pa_str_in_list_spaces(pa_proplist_gets(p, PA_PROP_DEVICE_INTENDED_ROLES), "phone"))
3782 else if (pa_startswith(s, "iec958-"))
3789 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3791 /* Called from the IO thread. */
3792 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3793 pa_sink_volume_change *c;
3794 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3795 c = pa_xnew(pa_sink_volume_change, 1);
3797 PA_LLIST_INIT(pa_sink_volume_change, c);
3799 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3803 /* Called from the IO thread. */
3804 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3806 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3810 /* Called from the IO thread. */
3811 void pa_sink_volume_change_push(pa_sink *s) {
3812 pa_sink_volume_change *c = NULL;
3813 pa_sink_volume_change *nc = NULL;
3814 pa_sink_volume_change *pc = NULL;
3815 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3817 const char *direction = NULL;
3820 nc = pa_sink_volume_change_new(s);
3822 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3823 * Adding one more volume for HW would get us rid of this, but I am trying
3824 * to survive with the ones we already have. */
3825 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3827 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3828 pa_log_debug("Volume not changing");
3829 pa_sink_volume_change_free(nc);
3833 nc->at = pa_sink_get_latency_within_thread(s, false);
3834 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3836 if (s->thread_info.volume_changes_tail) {
3837 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3838 /* If volume is going up let's do it a bit late. If it is going
3839 * down let's do it a bit early. */
3840 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3841 if (nc->at + safety_margin > c->at) {
3842 nc->at += safety_margin;
3847 else if (nc->at - safety_margin > c->at) {
3848 nc->at -= safety_margin;
3856 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3857 nc->at += safety_margin;
3860 nc->at -= safety_margin;
3863 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3866 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3869 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3871 /* We can ignore volume events that came earlier but should happen later than this. */
3872 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3873 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3874 pa_sink_volume_change_free(c);
3877 s->thread_info.volume_changes_tail = nc;
3880 /* Called from the IO thread. */
3881 static void pa_sink_volume_change_flush(pa_sink *s) {
3882 pa_sink_volume_change *c = s->thread_info.volume_changes;
3884 s->thread_info.volume_changes = NULL;
3885 s->thread_info.volume_changes_tail = NULL;
3887 pa_sink_volume_change *next = c->next;
3888 pa_sink_volume_change_free(c);
3893 /* Called from the IO thread. */
3894 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3900 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3906 pa_assert(s->write_volume);
3908 now = pa_rtclock_now();
3910 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3911 pa_sink_volume_change *c = s->thread_info.volume_changes;
3912 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3913 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3914 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3916 s->thread_info.current_hw_volume = c->hw_volume;
3917 pa_sink_volume_change_free(c);
3923 if (s->thread_info.volume_changes) {
3925 *usec_to_next = s->thread_info.volume_changes->at - now;
3926 if (pa_log_ratelimit(PA_LOG_DEBUG))
3927 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3932 s->thread_info.volume_changes_tail = NULL;
3937 /* Called from the IO thread. */
3938 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3939 /* All the queued volume events later than current latency are shifted to happen earlier. */
3940 pa_sink_volume_change *c;
3941 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3942 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3943 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3945 pa_log_debug("latency = %lld", (long long) limit);
3946 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3948 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3949 pa_usec_t modified_limit = limit;
3950 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3951 modified_limit -= s->thread_info.volume_change_safety_margin;
3953 modified_limit += s->thread_info.volume_change_safety_margin;
3954 if (c->at > modified_limit) {
3956 if (c->at < modified_limit)
3957 c->at = modified_limit;
3959 prev_vol = pa_cvolume_avg(&c->hw_volume);
3961 pa_sink_volume_change_apply(s, NULL);
3964 /* Called from the main thread */
3965 /* Gets the list of formats supported by the sink. The members and idxset must
3966 * be freed by the caller. */
3967 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3972 if (s->get_formats) {
3973 /* Sink supports format query, all is good */
3974 ret = s->get_formats(s);
3976 /* Sink doesn't support format query, so assume it does PCM */
3977 pa_format_info *f = pa_format_info_new();
3978 f->encoding = PA_ENCODING_PCM;
3980 ret = pa_idxset_new(NULL, NULL);
3981 pa_idxset_put(ret, f, NULL);
3987 /* Called from the main thread */
3988 /* Allows an external source to set what formats a sink supports if the sink
3989 * permits this. The function makes a copy of the formats on success. */
3990 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3995 /* Sink supports setting formats -- let's give it a shot */
3996 return s->set_formats(s, formats);
3998 /* Sink doesn't support setting this -- bail out */
4002 /* Called from the main thread */
4003 /* Checks if the sink can accept this format */
4004 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
4005 pa_idxset *formats = NULL;
4011 formats = pa_sink_get_formats(s);
4014 pa_format_info *finfo_device;
4017 PA_IDXSET_FOREACH(finfo_device, formats, i) {
4018 if (pa_format_info_is_compatible(finfo_device, f)) {
4024 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
4030 /* Called from the main thread */
4031 /* Calculates the intersection between formats supported by the sink and
4032 * in_formats, and returns these, in the order of the sink's formats. */
4033 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
4034 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
4035 pa_format_info *f_sink, *f_in;
4040 if (!in_formats || pa_idxset_isempty(in_formats))
4043 sink_formats = pa_sink_get_formats(s);
4045 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
4046 PA_IDXSET_FOREACH(f_in, in_formats, j) {
4047 if (pa_format_info_is_compatible(f_sink, f_in))
4048 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
4054 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
4059 /* Called from the main thread */
4060 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
4061 pa_sample_format_t old_format;
4064 pa_assert(pa_sample_format_valid(format));
4066 old_format = s->sample_spec.format;
4067 if (old_format == format)
4070 pa_log_info("%s: format: %s -> %s",
4071 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
4073 s->sample_spec.format = format;
4075 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4078 /* Called from the main thread */
4079 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
4083 pa_assert(pa_sample_rate_valid(rate));
4085 old_rate = s->sample_spec.rate;
4086 if (old_rate == rate)
4089 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
4091 s->sample_spec.rate = rate;
4093 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4096 /* Called from the main thread. */
4097 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4098 pa_cvolume old_volume;
4099 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4100 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4105 old_volume = s->reference_volume;
4107 if (pa_cvolume_equal(volume, &old_volume))
4110 s->reference_volume = *volume;
4111 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4112 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4113 s->flags & PA_SINK_DECIBEL_VOLUME),
4114 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4115 s->flags & PA_SINK_DECIBEL_VOLUME));
4117 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4118 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
4121 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
4126 pa_assert(old_sink);
4128 if (core->state == PA_CORE_SHUTDOWN)
4131 if (core->default_sink == NULL || core->default_sink->unlink_requested)
4134 if (old_sink == core->default_sink)
4137 PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
4138 if (!PA_SINK_INPUT_IS_LINKED(i->state))
4144 /* Don't move sink-inputs which connect filter sinks to their target sinks */
4148 /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
4149 if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
4152 if (!pa_sink_input_may_move_to(i, core->default_sink))
4155 if (default_sink_changed)
4156 pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
4157 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4159 pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
4160 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4162 pa_sink_input_move_to(i, core->default_sink, false);