2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
53 #include <pulsecore/proplist-util.h>
58 #define MAX_MIX_CHANNELS 32
59 #define MIX_BUFFER_LENGTH (pa_page_size())
60 #define ABSOLUTE_MIN_LATENCY (500)
61 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
62 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
64 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
66 struct pa_sink_volume_change {
70 PA_LLIST_FIELDS(pa_sink_volume_change);
73 struct set_state_data {
74 pa_sink_state_t state;
75 pa_suspend_cause_t suspend_cause;
78 static void sink_free(pa_object *s);
80 static void pa_sink_volume_change_push(pa_sink *s);
81 static void pa_sink_volume_change_flush(pa_sink *s);
82 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
85 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
87 char *dump_time = NULL, *dump_path_surfix = NULL;
88 const char *s_device_api_str, *card_name_str, *device_idx_str;
93 /* open file for dump pcm */
94 if (s->core->pcm_dump & PA_PCM_DUMP_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
95 pa_gettimeofday(&now);
96 localtime_r(&now.tv_sec, &tm);
97 memset(&datetime[0], 0x00, sizeof(datetime));
98 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
99 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
101 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
102 if (pa_streq(s_device_api_str, "alsa")) {
103 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
104 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
105 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
107 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
110 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
113 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
114 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
116 s->pcm_dump_fp = fopen(s->dump_path, "w");
118 pa_log_warn("%s open failed", s->dump_path);
120 pa_log_info("%s opened", s->dump_path);
123 pa_xfree(dump_path_surfix);
124 /* close file for dump pcm when config is changed */
125 } else if (~s->core->pcm_dump & PA_PCM_DUMP_SINK && s->pcm_dump_fp) {
126 fclose(s->pcm_dump_fp);
127 pa_log_info("%s closed", s->dump_path);
128 pa_xfree(s->dump_path);
129 s->pcm_dump_fp = NULL;
133 if (s->pcm_dump_fp) {
136 ptr = pa_memblock_acquire(chunk->memblock);
138 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
140 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
142 pa_memblock_release(chunk->memblock);
147 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
151 data->proplist = pa_proplist_new();
152 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
157 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
160 pa_xfree(data->name);
161 data->name = pa_xstrdup(name);
164 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
167 if ((data->sample_spec_is_set = !!spec))
168 data->sample_spec = *spec;
171 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
174 if ((data->channel_map_is_set = !!map))
175 data->channel_map = *map;
178 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
181 data->alternate_sample_rate_is_set = true;
182 data->alternate_sample_rate = alternate_sample_rate;
185 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
188 data->avoid_resampling_is_set = true;
189 data->avoid_resampling = avoid_resampling;
192 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
195 if ((data->volume_is_set = !!volume))
196 data->volume = *volume;
199 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
202 data->muted_is_set = true;
206 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
209 pa_xfree(data->active_port);
210 data->active_port = pa_xstrdup(port);
213 void pa_sink_new_data_done(pa_sink_new_data *data) {
216 pa_proplist_free(data->proplist);
219 pa_hashmap_free(data->ports);
221 pa_xfree(data->name);
222 pa_xfree(data->active_port);
225 /* Called from main context */
226 static void reset_callbacks(pa_sink *s) {
229 s->set_state_in_main_thread = NULL;
230 s->set_state_in_io_thread = NULL;
231 s->get_volume = NULL;
232 s->set_volume = NULL;
233 s->write_volume = NULL;
236 s->request_rewind = NULL;
237 s->update_requested_latency = NULL;
239 s->get_formats = NULL;
240 s->set_formats = NULL;
241 s->reconfigure = NULL;
244 /* Called from main context */
245 pa_sink* pa_sink_new(
247 pa_sink_new_data *data,
248 pa_sink_flags_t flags) {
252 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
253 pa_source_new_data source_data;
259 pa_assert(data->name);
260 pa_assert_ctl_context();
262 s = pa_msgobject_new(pa_sink);
264 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
265 pa_log_debug("Failed to register name %s.", data->name);
270 pa_sink_new_data_set_name(data, name);
272 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
274 pa_namereg_unregister(core, name);
278 /* FIXME, need to free s here on failure */
280 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
281 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
283 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
285 if (!data->channel_map_is_set)
286 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
288 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
289 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
291 /* FIXME: There should probably be a general function for checking whether
292 * the sink volume is allowed to be set, like there is for sink inputs. */
293 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
295 if (!data->volume_is_set) {
296 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
297 data->save_volume = false;
300 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
301 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
303 if (!data->muted_is_set)
307 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
309 pa_device_init_description(data->proplist, data->card);
310 pa_device_init_icon(data->proplist, true);
311 pa_device_init_intended_roles(data->proplist);
313 if (!data->active_port) {
314 pa_device_port *p = pa_device_port_find_best(data->ports);
316 pa_sink_new_data_set_port(data, p->name);
319 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
321 pa_namereg_unregister(core, name);
325 s->parent.parent.free = sink_free;
326 s->parent.process_msg = pa_sink_process_msg;
329 s->state = PA_SINK_INIT;
332 s->suspend_cause = data->suspend_cause;
333 s->name = pa_xstrdup(name);
334 s->proplist = pa_proplist_copy(data->proplist);
335 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
336 s->module = data->module;
337 s->card = data->card;
339 s->priority = pa_device_init_priority(s->proplist);
341 s->sample_spec = data->sample_spec;
342 s->channel_map = data->channel_map;
343 s->default_sample_rate = s->sample_spec.rate;
345 if (data->alternate_sample_rate_is_set)
346 s->alternate_sample_rate = data->alternate_sample_rate;
348 s->alternate_sample_rate = s->core->alternate_sample_rate;
351 s->avoid_resampling = data->avoid_resampling;
353 s->origin_avoid_resampling = data->avoid_resampling;
354 s->selected_sample_format = s->sample_spec.format;
355 s->selected_sample_rate = s->sample_spec.rate;
358 if (data->avoid_resampling_is_set)
359 s->avoid_resampling = data->avoid_resampling;
361 s->avoid_resampling = s->core->avoid_resampling;
362 >>>>>>> upstream/15.0
364 s->inputs = pa_idxset_new(NULL, NULL);
366 s->input_to_master = NULL;
368 s->reference_volume = s->real_volume = data->volume;
369 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
370 s->base_volume = PA_VOLUME_NORM;
371 s->n_volume_steps = PA_VOLUME_NORM+1;
372 s->muted = data->muted;
373 s->refresh_volume = s->refresh_muted = false;
380 /* As a minor optimization we just steal the list instead of
382 s->ports = data->ports;
385 s->active_port = NULL;
386 s->save_port = false;
388 if (data->active_port)
389 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
390 s->save_port = data->save_port;
392 /* Hopefully the active port has already been assigned in the previous call
393 to pa_device_port_find_best, but better safe than sorry */
395 s->active_port = pa_device_port_find_best(s->ports);
398 s->port_latency_offset = s->active_port->latency_offset;
400 s->port_latency_offset = 0;
402 s->save_volume = data->save_volume;
403 s->save_muted = data->save_muted;
404 #ifdef TIZEN_PCM_DUMP
405 s->pcm_dump_fp = NULL;
409 pa_silence_memchunk_get(
410 &core->silence_cache,
416 s->thread_info.rtpoll = NULL;
417 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
418 (pa_free_cb_t) pa_sink_input_unref);
419 s->thread_info.soft_volume = s->soft_volume;
420 s->thread_info.soft_muted = s->muted;
421 s->thread_info.state = s->state;
422 s->thread_info.rewind_nbytes = 0;
423 s->thread_info.rewind_requested = false;
424 s->thread_info.max_rewind = 0;
425 s->thread_info.max_request = 0;
426 s->thread_info.requested_latency_valid = false;
427 s->thread_info.requested_latency = 0;
428 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
429 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
430 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
432 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
433 s->thread_info.volume_changes_tail = NULL;
434 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
435 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
436 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
437 s->thread_info.port_latency_offset = s->port_latency_offset;
439 /* FIXME: This should probably be moved to pa_sink_put() */
440 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
443 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
445 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
446 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
449 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
450 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
454 pa_source_new_data_init(&source_data);
455 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
456 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
457 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
458 pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
459 source_data.name = pa_sprintf_malloc("%s.monitor", name);
460 source_data.driver = data->driver;
461 source_data.module = data->module;
462 source_data.card = data->card;
464 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
465 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
466 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
468 s->monitor_source = pa_source_new(core, &source_data,
469 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
470 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
472 pa_source_new_data_done(&source_data);
474 if (!s->monitor_source) {
480 s->monitor_source->monitor_of = s;
482 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
483 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
484 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
489 /* Called from main context */
490 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
493 bool suspend_cause_changed;
496 pa_sink_state_t old_state;
497 pa_suspend_cause_t old_suspend_cause;
500 pa_assert_ctl_context();
502 state_changed = state != s->state;
503 suspend_cause_changed = suspend_cause != s->suspend_cause;
505 if (!state_changed && !suspend_cause_changed)
508 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
509 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
511 /* If we are resuming, suspend_cause must be 0. */
512 pa_assert(!resuming || !suspend_cause);
514 /* Here's something to think about: what to do with the suspend cause if
515 * resuming the sink fails? The old suspend cause will be incorrect, so we
516 * can't use that. On the other hand, if we set no suspend cause (as is the
517 * case currently), then it looks strange to have a sink suspended without
518 * any cause. It might be a good idea to add a new "resume failed" suspend
519 * cause, or it might just add unnecessary complexity, given that the
520 * current approach of not setting any suspend cause works well enough. */
522 if (s->set_state_in_main_thread) {
523 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
524 /* set_state_in_main_thread() is allowed to fail only when resuming. */
527 /* If resuming fails, we set the state to SUSPENDED and
528 * suspend_cause to 0. */
529 state = PA_SINK_SUSPENDED;
531 state_changed = false;
532 suspend_cause_changed = suspend_cause != s->suspend_cause;
535 /* We know the state isn't changing. If the suspend cause isn't
536 * changing either, then there's nothing more to do. */
537 if (!suspend_cause_changed)
543 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
545 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
546 /* SET_STATE is allowed to fail only when resuming. */
549 if (s->set_state_in_main_thread)
550 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
552 /* If resuming fails, we set the state to SUSPENDED and
553 * suspend_cause to 0. */
554 state = PA_SINK_SUSPENDED;
556 state_changed = false;
557 suspend_cause_changed = suspend_cause != s->suspend_cause;
560 /* We know the state isn't changing. If the suspend cause isn't
561 * changing either, then there's nothing more to do. */
562 if (!suspend_cause_changed)
567 #ifdef TIZEN_PCM_DUMP
568 /* close file for dump pcm */
569 if (s->pcm_dump_fp && (s->core->pcm_dump & PA_PCM_DUMP_SEPARATED) && suspending) {
570 fclose(s->pcm_dump_fp);
571 pa_log_info("%s closed", s->dump_path);
572 pa_xfree(s->dump_path);
573 s->pcm_dump_fp = NULL;
576 old_suspend_cause = s->suspend_cause;
577 if (suspend_cause_changed) {
578 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
579 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
581 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
582 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
583 s->suspend_cause = suspend_cause;
586 old_state = s->state;
588 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
591 /* If we enter UNLINKED state, then we don't send change notifications.
592 * pa_sink_unlink() will send unlink notifications instead. */
593 if (state != PA_SINK_UNLINKED) {
594 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
599 if (suspending || resuming || suspend_cause_changed) {
603 /* We're suspending or resuming, tell everyone about it */
605 PA_IDXSET_FOREACH(i, s->inputs, idx)
606 if (s->state == PA_SINK_SUSPENDED &&
607 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
608 pa_sink_input_kill(i);
610 i->suspend(i, old_state, old_suspend_cause);
613 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
614 pa_source_sync_suspend(s->monitor_source);
619 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
625 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
626 pa_sink_flags_t flags;
629 pa_assert(!s->write_volume || cb);
633 /* Save the current flags so we can tell if they've changed */
637 /* The sink implementor is responsible for setting decibel volume support */
638 s->flags |= PA_SINK_HW_VOLUME_CTRL;
640 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
641 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
642 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
645 /* If the flags have changed after init, let any clients know via a change event */
646 if (s->state != PA_SINK_INIT && flags != s->flags)
647 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
650 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
651 pa_sink_flags_t flags;
654 pa_assert(!cb || s->set_volume);
656 s->write_volume = cb;
658 /* Save the current flags so we can tell if they've changed */
662 s->flags |= PA_SINK_DEFERRED_VOLUME;
664 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
666 /* If the flags have changed after init, let any clients know via a change event */
667 if (s->state != PA_SINK_INIT && flags != s->flags)
668 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
671 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
677 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
678 pa_sink_flags_t flags;
684 /* Save the current flags so we can tell if they've changed */
688 s->flags |= PA_SINK_HW_MUTE_CTRL;
690 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
692 /* If the flags have changed after init, let any clients know via a change event */
693 if (s->state != PA_SINK_INIT && flags != s->flags)
694 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
697 static void enable_flat_volume(pa_sink *s, bool enable) {
698 pa_sink_flags_t flags;
702 /* Always follow the overall user preference here */
703 enable = enable && s->core->flat_volumes;
705 /* Save the current flags so we can tell if they've changed */
709 s->flags |= PA_SINK_FLAT_VOLUME;
711 s->flags &= ~PA_SINK_FLAT_VOLUME;
713 /* If the flags have changed after init, let any clients know via a change event */
714 if (s->state != PA_SINK_INIT && flags != s->flags)
715 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
718 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
719 pa_sink_flags_t flags;
723 /* Save the current flags so we can tell if they've changed */
727 s->flags |= PA_SINK_DECIBEL_VOLUME;
728 enable_flat_volume(s, true);
730 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
731 enable_flat_volume(s, false);
734 /* If the flags have changed after init, let any clients know via a change event */
735 if (s->state != PA_SINK_INIT && flags != s->flags)
736 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
739 /* Called from main context */
740 void pa_sink_put(pa_sink* s) {
741 pa_sink_assert_ref(s);
742 pa_assert_ctl_context();
744 pa_assert(s->state == PA_SINK_INIT);
745 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
747 /* The following fields must be initialized properly when calling _put() */
748 pa_assert(s->asyncmsgq);
749 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
751 /* Generally, flags should be initialized via pa_sink_new(). As a
752 * special exception we allow some volume related flags to be set
753 * between _new() and _put() by the callback setter functions above.
755 * Thus we implement a couple safeguards here which ensure the above
756 * setters were used (or at least the implementor made manual changes
757 * in a compatible way).
759 * Note: All of these flags set here can change over the life time
761 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
762 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
763 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
765 /* XXX: Currently decibel volume is disabled for all sinks that use volume
766 * sharing. When the master sink supports decibel volume, it would be good
767 * to have the flag also in the filter sink, but currently we don't do that
768 * so that the flags of the filter sink never change when it's moved from
769 * a master sink to another. One solution for this problem would be to
770 * remove user-visible volume altogether from filter sinks when volume
771 * sharing is used, but the current approach was easier to implement... */
772 /* We always support decibel volumes in software, otherwise we leave it to
773 * the sink implementor to set this flag as needed.
775 * Note: This flag can also change over the life time of the sink. */
776 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
777 pa_sink_enable_decibel_volume(s, true);
778 s->soft_volume = s->reference_volume;
781 /* If the sink implementor support DB volumes by itself, we should always
782 * try and enable flat volumes too */
783 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
784 enable_flat_volume(s, true);
786 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
787 pa_sink *root_sink = pa_sink_get_master(s);
789 pa_assert(root_sink);
791 s->reference_volume = root_sink->reference_volume;
792 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
794 s->real_volume = root_sink->real_volume;
795 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
797 /* We assume that if the sink implementor changed the default
798 * volume they did so in real_volume, because that is the usual
799 * place where they are supposed to place their changes. */
800 s->reference_volume = s->real_volume;
802 s->thread_info.soft_volume = s->soft_volume;
803 s->thread_info.soft_muted = s->muted;
804 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
806 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
807 || (s->base_volume == PA_VOLUME_NORM
808 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
809 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
810 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
811 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
812 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
814 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
815 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
816 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
818 if (s->suspend_cause)
819 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
821 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
823 pa_source_put(s->monitor_source);
825 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
826 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
828 /* It's good to fire the SINK_PUT hook before updating the default sink,
829 * because module-switch-on-connect will set the new sink as the default
830 * sink, and if we were to call pa_core_update_default_sink() before that,
831 * the default sink might change twice, causing unnecessary stream moving. */
833 pa_core_update_default_sink(s->core);
835 pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
838 /* Called from main context */
839 void pa_sink_unlink(pa_sink* s) {
841 pa_sink_input *i, PA_UNUSED *j = NULL;
843 pa_sink_assert_ref(s);
844 pa_assert_ctl_context();
846 /* Please note that pa_sink_unlink() does more than simply
847 * reversing pa_sink_put(). It also undoes the registrations
848 * already done in pa_sink_new()! */
850 if (s->unlink_requested)
853 s->unlink_requested = true;
855 linked = PA_SINK_IS_LINKED(s->state);
858 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
860 if (s->state != PA_SINK_UNLINKED)
861 pa_namereg_unregister(s->core, s->name);
862 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
864 pa_core_update_default_sink(s->core);
866 if (linked && s->core->rescue_streams)
867 pa_sink_move_streams_to_default_sink(s->core, s, false);
870 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
872 while ((i = pa_idxset_first(s->inputs, NULL))) {
874 pa_sink_input_kill(i);
878 /* Unlink monitor source before unlinking the sink */
879 if (s->monitor_source)
880 pa_source_unlink(s->monitor_source);
883 /* It's important to keep the suspend cause unchanged when unlinking,
884 * because if we remove the SESSION suspend cause here, the alsa sink
885 * will sync its volume with the hardware while another user is
886 * active, messing up the volume for that other user. */
887 sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
889 s->state = PA_SINK_UNLINKED;
894 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
895 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
899 /* Called from main context */
900 static void sink_free(pa_object *o) {
901 pa_sink *s = PA_SINK(o);
904 pa_assert_ctl_context();
905 pa_assert(pa_sink_refcnt(s) == 0);
906 pa_assert(!PA_SINK_IS_LINKED(s->state));
908 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
910 pa_sink_volume_change_flush(s);
912 if (s->monitor_source) {
913 pa_source_unref(s->monitor_source);
914 s->monitor_source = NULL;
917 pa_idxset_free(s->inputs, NULL);
918 pa_hashmap_free(s->thread_info.inputs);
920 if (s->silence.memblock)
921 pa_memblock_unref(s->silence.memblock);
927 pa_proplist_free(s->proplist);
930 pa_hashmap_free(s->ports);
932 #ifdef TIZEN_PCM_DUMP
933 /* close file for dump pcm */
934 if (s->pcm_dump_fp) {
935 fclose(s->pcm_dump_fp);
936 pa_log_info("%s closed", s->dump_path);
937 pa_xfree(s->dump_path);
938 s->pcm_dump_fp = NULL;
944 /* Called from main context, and not while the IO thread is active, please */
945 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
946 pa_sink_assert_ref(s);
947 pa_assert_ctl_context();
951 if (s->monitor_source)
952 pa_source_set_asyncmsgq(s->monitor_source, q);
955 /* Called from main context, and not while the IO thread is active, please */
956 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
957 pa_sink_flags_t old_flags;
958 pa_sink_input *input;
961 pa_sink_assert_ref(s);
962 pa_assert_ctl_context();
964 /* For now, allow only a minimal set of flags to be changed. */
965 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
967 old_flags = s->flags;
968 s->flags = (s->flags & ~mask) | (value & mask);
970 if (s->flags == old_flags)
973 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
974 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
976 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
977 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
978 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
980 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
981 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
983 if (s->monitor_source)
984 pa_source_update_flags(s->monitor_source,
985 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
986 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
987 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
988 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
990 PA_IDXSET_FOREACH(input, s->inputs, idx) {
991 if (input->origin_sink)
992 pa_sink_update_flags(input->origin_sink, mask, value);
996 /* Called from IO context, or before _put() from main context */
997 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
998 pa_sink_assert_ref(s);
999 pa_sink_assert_io_context(s);
1001 s->thread_info.rtpoll = p;
1003 if (s->monitor_source)
1004 pa_source_set_rtpoll(s->monitor_source, p);
1007 /* Called from main context */
1008 int pa_sink_update_status(pa_sink*s) {
1009 pa_sink_assert_ref(s);
1010 pa_assert_ctl_context();
1011 pa_assert(PA_SINK_IS_LINKED(s->state));
1013 if (s->state == PA_SINK_SUSPENDED)
1016 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1019 /* Called from main context */
1020 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
1021 pa_suspend_cause_t merged_cause;
1023 pa_sink_assert_ref(s);
1024 pa_assert_ctl_context();
1025 pa_assert(PA_SINK_IS_LINKED(s->state));
1026 pa_assert(cause != 0);
1029 merged_cause = s->suspend_cause | cause;
1031 merged_cause = s->suspend_cause & ~cause;
1034 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
1036 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1039 /* Called from main context */
1040 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1041 pa_sink_input *i, *n;
1044 pa_sink_assert_ref(s);
1045 pa_assert_ctl_context();
1046 pa_assert(PA_SINK_IS_LINKED(s->state));
1051 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1052 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1054 pa_sink_input_ref(i);
1056 if (pa_sink_input_start_move(i) >= 0)
1057 pa_queue_push(q, i);
1059 pa_sink_input_unref(i);
1065 /* Called from main context */
1066 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1069 pa_sink_assert_ref(s);
1070 pa_assert_ctl_context();
1071 pa_assert(PA_SINK_IS_LINKED(s->state));
1074 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1075 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1076 if (pa_sink_input_finish_move(i, s, save) < 0)
1077 pa_sink_input_fail_move(i);
1080 pa_sink_input_unref(i);
1083 pa_queue_free(q, NULL);
1086 /* Called from main context */
1087 void pa_sink_move_all_fail(pa_queue *q) {
1090 pa_assert_ctl_context();
1093 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1094 pa_sink_input_fail_move(i);
1095 pa_sink_input_unref(i);
1098 pa_queue_free(q, NULL);
1101 /* Called from IO thread context */
1102 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1107 pa_sink_assert_ref(s);
1108 pa_sink_assert_io_context(s);
1110 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1111 size_t uf = i->thread_info.underrun_for_sink;
1113 /* Propagate down the filter tree */
1114 if (i->origin_sink) {
1115 size_t filter_result, left_to_play_origin;
1117 /* The recursive call works in the origin sink domain ... */
1118 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1120 /* .. and returns the time to sleep before waking up. We need the
1121 * underrun duration for comparisons, so we undo the subtraction on
1122 * the return value... */
1123 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1125 /* ... and convert it back to the master sink domain */
1126 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1128 /* Remember the longest underrun so far */
1129 if (filter_result > result)
1130 result = filter_result;
1134 /* No underrun here, move on */
1136 } else if (uf >= left_to_play) {
1137 /* The sink has possibly consumed all the data the sink input provided */
1138 pa_sink_input_process_underrun(i);
1139 } else if (uf > result) {
1140 /* Remember the longest underrun so far */
1146 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1147 (long) result, (long) left_to_play - result);
1148 return left_to_play - result;
1151 /* Called from IO thread context */
1152 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1156 pa_sink_assert_ref(s);
1157 pa_sink_assert_io_context(s);
1158 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1160 /* If nobody requested this and this is actually no real rewind
1161 * then we can short cut this. Please note that this means that
1162 * not all rewind requests triggered upstream will always be
1163 * translated in actual requests! */
1164 if (!s->thread_info.rewind_requested && nbytes <= 0)
1167 s->thread_info.rewind_nbytes = 0;
1168 s->thread_info.rewind_requested = false;
1171 pa_log_debug("Processing rewind...");
1172 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1173 pa_sink_volume_change_rewind(s, nbytes);
1174 #ifdef TIZEN_PCM_DUMP
1177 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1181 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1182 pa_sink_input_assert_ref(i);
1183 pa_sink_input_process_rewind(i, nbytes);
1187 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1188 pa_source_process_rewind(s->monitor_source, nbytes);
1192 /* Called from IO thread context */
1193 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1197 size_t mixlength = *length;
1199 pa_sink_assert_ref(s);
1200 pa_sink_assert_io_context(s);
1203 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1204 pa_sink_input_assert_ref(i);
1206 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1208 if (mixlength == 0 || info->chunk.length < mixlength)
1209 mixlength = info->chunk.length;
1211 if (pa_memblock_is_silence(info->chunk.memblock)) {
1212 pa_memblock_unref(info->chunk.memblock);
1216 info->userdata = pa_sink_input_ref(i);
1218 pa_assert(info->chunk.memblock);
1219 pa_assert(info->chunk.length > 0);
1227 *length = mixlength;
1232 /* Called from IO thread context */
1233 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1237 unsigned n_unreffed = 0;
1239 pa_sink_assert_ref(s);
1240 pa_sink_assert_io_context(s);
1242 pa_assert(result->memblock);
1243 pa_assert(result->length > 0);
1245 /* We optimize for the case where the order of the inputs has not changed */
1247 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1249 pa_mix_info* m = NULL;
1251 pa_sink_input_assert_ref(i);
1253 /* Let's try to find the matching entry info the pa_mix_info array */
1254 for (j = 0; j < n; j ++) {
1256 if (info[p].userdata == i) {
1266 /* Drop read data */
1267 pa_sink_input_drop(i, result->length);
1269 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1271 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1272 void *ostate = NULL;
1273 pa_source_output *o;
1276 if (m && m->chunk.memblock) {
1278 pa_memblock_ref(c.memblock);
1279 pa_assert(result->length <= c.length);
1280 c.length = result->length;
1282 pa_memchunk_make_writable(&c, 0);
1283 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1286 pa_memblock_ref(c.memblock);
1287 pa_assert(result->length <= c.length);
1288 c.length = result->length;
1291 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1292 pa_source_output_assert_ref(o);
1293 pa_assert(o->direct_on_input == i);
1294 pa_source_post_direct(s->monitor_source, o, &c);
1297 pa_memblock_unref(c.memblock);
1302 if (m->chunk.memblock) {
1303 pa_memblock_unref(m->chunk.memblock);
1304 pa_memchunk_reset(&m->chunk);
1307 pa_sink_input_unref(m->userdata);
1314 /* Now drop references to entries that are included in the
1315 * pa_mix_info array but don't exist anymore */
1317 if (n_unreffed < n) {
1318 for (; n > 0; info++, n--) {
1320 pa_sink_input_unref(info->userdata);
1321 if (info->chunk.memblock)
1322 pa_memblock_unref(info->chunk.memblock);
1326 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1327 pa_source_post(s->monitor_source, result);
1330 /* Called from IO thread context */
1331 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1332 pa_mix_info info[MAX_MIX_CHANNELS];
1334 size_t block_size_max;
1336 pa_sink_assert_ref(s);
1337 pa_sink_assert_io_context(s);
1338 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1339 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1342 pa_assert(!s->thread_info.rewind_requested);
1343 pa_assert(s->thread_info.rewind_nbytes == 0);
1345 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1346 result->memblock = pa_memblock_ref(s->silence.memblock);
1347 result->index = s->silence.index;
1348 result->length = PA_MIN(s->silence.length, length);
1355 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1357 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1358 if (length > block_size_max)
1359 length = pa_frame_align(block_size_max, &s->sample_spec);
1361 pa_assert(length > 0);
1363 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1367 *result = s->silence;
1368 pa_memblock_ref(result->memblock);
1370 if (result->length > length)
1371 result->length = length;
1373 } else if (n == 1) {
1376 *result = info[0].chunk;
1377 pa_memblock_ref(result->memblock);
1379 if (result->length > length)
1380 result->length = length;
1382 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1384 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1385 pa_memblock_unref(result->memblock);
1386 pa_silence_memchunk_get(&s->core->silence_cache,
1391 } else if (!pa_cvolume_is_norm(&volume)) {
1392 pa_memchunk_make_writable(result, 0);
1393 pa_volume_memchunk(result, &s->sample_spec, &volume);
1397 result->memblock = pa_memblock_new(s->core->mempool, length);
1399 ptr = pa_memblock_acquire(result->memblock);
1400 result->length = pa_mix(info, n,
1403 &s->thread_info.soft_volume,
1404 s->thread_info.soft_muted);
1405 pa_memblock_release(result->memblock);
1410 inputs_drop(s, info, n, result);
1412 #ifdef TIZEN_PCM_DUMP
1413 pa_sink_write_pcm_dump(s, result);
1418 /* Called from IO thread context */
1419 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1420 pa_mix_info info[MAX_MIX_CHANNELS];
1422 size_t length, block_size_max;
1424 pa_sink_assert_ref(s);
1425 pa_sink_assert_io_context(s);
1426 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1428 pa_assert(target->memblock);
1429 pa_assert(target->length > 0);
1430 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1432 pa_assert(!s->thread_info.rewind_requested);
1433 pa_assert(s->thread_info.rewind_nbytes == 0);
1435 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1436 pa_silence_memchunk(target, &s->sample_spec);
1442 length = target->length;
1443 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1444 if (length > block_size_max)
1445 length = pa_frame_align(block_size_max, &s->sample_spec);
1447 pa_assert(length > 0);
1449 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1452 if (target->length > length)
1453 target->length = length;
1455 pa_silence_memchunk(target, &s->sample_spec);
1456 } else if (n == 1) {
1459 if (target->length > length)
1460 target->length = length;
1462 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1464 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1465 pa_silence_memchunk(target, &s->sample_spec);
1469 vchunk = info[0].chunk;
1470 pa_memblock_ref(vchunk.memblock);
1472 if (vchunk.length > length)
1473 vchunk.length = length;
1475 if (!pa_cvolume_is_norm(&volume)) {
1476 pa_memchunk_make_writable(&vchunk, 0);
1477 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1480 pa_memchunk_memcpy(target, &vchunk);
1481 pa_memblock_unref(vchunk.memblock);
1487 ptr = pa_memblock_acquire(target->memblock);
1489 target->length = pa_mix(info, n,
1490 (uint8_t*) ptr + target->index, length,
1492 &s->thread_info.soft_volume,
1493 s->thread_info.soft_muted);
1495 pa_memblock_release(target->memblock);
1498 inputs_drop(s, info, n, target);
1500 #ifdef TIZEN_PCM_DUMP
1501 pa_sink_write_pcm_dump(s, target);
1506 /* Called from IO thread context */
1507 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1511 pa_sink_assert_ref(s);
1512 pa_sink_assert_io_context(s);
1513 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1515 pa_assert(target->memblock);
1516 pa_assert(target->length > 0);
1517 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1519 pa_assert(!s->thread_info.rewind_requested);
1520 pa_assert(s->thread_info.rewind_nbytes == 0);
1522 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1523 pa_silence_memchunk(target, &s->sample_spec);
1536 pa_sink_render_into(s, &chunk);
1545 /* Called from IO thread context */
1546 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1547 pa_sink_assert_ref(s);
1548 pa_sink_assert_io_context(s);
1549 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1550 pa_assert(length > 0);
1551 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1554 pa_assert(!s->thread_info.rewind_requested);
1555 pa_assert(s->thread_info.rewind_nbytes == 0);
1559 pa_sink_render(s, length, result);
1561 if (result->length < length) {
1564 pa_memchunk_make_writable(result, length);
1566 chunk.memblock = result->memblock;
1567 chunk.index = result->index + result->length;
1568 chunk.length = length - result->length;
1570 pa_sink_render_into_full(s, &chunk);
1572 result->length = length;
1578 /* Called from main thread */
1579 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1580 pa_sample_spec desired_spec;
1581 uint32_t default_rate = s->default_sample_rate;
1582 uint32_t alternate_rate = s->alternate_sample_rate;
1585 bool default_rate_is_usable = false;
1586 bool alternate_rate_is_usable = false;
1587 bool avoid_resampling = s->avoid_resampling;
1589 if (pa_sample_spec_equal(spec, &s->sample_spec))
1592 if (!s->reconfigure)
1596 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1597 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1602 if (PA_SINK_IS_RUNNING(s->state)) {
1603 pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1604 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1608 if (s->monitor_source) {
1609 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1610 pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1615 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1618 desired_spec = s->sample_spec;
1621 if (!avoid_resampling) {
1622 default_rate = alternate_rate = s->selected_sample_rate;
1623 desired_spec.format = s->selected_sample_format;
1627 /* We have to try to use the sink input format and rate */
1628 desired_spec.format = spec->format;
1629 desired_spec.rate = spec->rate;
1631 } else if (avoid_resampling) {
1632 /* We just try to set the sink input's sample rate if it's not too low */
1633 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1634 desired_spec.rate = spec->rate;
1635 desired_spec.format = spec->format;
1637 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1638 /* We can directly try to use this rate */
1639 desired_spec.rate = spec->rate;
1643 if (desired_spec.rate != spec->rate) {
1644 /* See if we can pick a rate that results in less resampling effort */
1645 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1646 default_rate_is_usable = true;
1647 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1648 default_rate_is_usable = true;
1649 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1650 alternate_rate_is_usable = true;
1651 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1652 alternate_rate_is_usable = true;
1654 if (alternate_rate_is_usable && !default_rate_is_usable)
1655 desired_spec.rate = alternate_rate;
1657 desired_spec.rate = default_rate;
1660 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1663 pa_log_info("desired spec is same as sink->sample_spec");
1670 if (!passthrough && pa_sink_used_by(s) > 0)
1673 pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1674 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1675 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1677 s->reconfigure(s, &desired_spec, passthrough);
1679 /* update monitor source as well */
1680 if (s->monitor_source && !passthrough)
1681 pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1682 pa_log_info("Reconfigured successfully");
1684 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1685 if (i->state == PA_SINK_INPUT_CORKED)
1686 pa_sink_input_update_resampler(i);
1689 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1692 /* Called from main thread */
1693 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1696 pa_sink_assert_ref(s);
1697 pa_assert_ctl_context();
1698 pa_assert(PA_SINK_IS_LINKED(s->state));
1700 /* The returned value is supposed to be in the time domain of the sound card! */
1702 if (s->state == PA_SINK_SUSPENDED)
1705 if (!(s->flags & PA_SINK_LATENCY))
1708 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1710 /* the return value is unsigned, so check that the offset can be added to usec without
1712 if (-s->port_latency_offset <= usec)
1713 usec += s->port_latency_offset;
1717 return (pa_usec_t)usec;
1720 /* Called from IO thread */
1721 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1725 pa_sink_assert_ref(s);
1726 pa_sink_assert_io_context(s);
1727 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1729 /* The returned value is supposed to be in the time domain of the sound card! */
1731 if (s->thread_info.state == PA_SINK_SUSPENDED)
1734 if (!(s->flags & PA_SINK_LATENCY))
1737 o = PA_MSGOBJECT(s);
1739 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1741 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1743 /* If allow_negative is false, the call should only return positive values, */
1744 usec += s->thread_info.port_latency_offset;
1745 if (!allow_negative && usec < 0)
1751 /* Called from the main thread (and also from the IO thread while the main
1752 * thread is waiting).
1754 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1755 * set. Instead, flat volume mode is detected by checking whether the root sink
1756 * has the flag set. */
1757 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1758 pa_sink_assert_ref(s);
1760 s = pa_sink_get_master(s);
1763 return (s->flags & PA_SINK_FLAT_VOLUME);
1768 /* Called from the main thread (and also from the IO thread while the main
1769 * thread is waiting). */
1770 pa_sink *pa_sink_get_master(pa_sink *s) {
1771 pa_sink_assert_ref(s);
1773 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1774 if (PA_UNLIKELY(!s->input_to_master))
1777 s = s->input_to_master->sink;
1783 /* Called from main context */
1784 bool pa_sink_is_filter(pa_sink *s) {
1785 pa_sink_assert_ref(s);
1787 return (s->input_to_master != NULL);
1790 /* Called from main context */
1791 bool pa_sink_is_passthrough(pa_sink *s) {
1792 pa_sink_input *alt_i;
1795 pa_sink_assert_ref(s);
1797 /* one and only one PASSTHROUGH input can possibly be connected */
1798 if (pa_idxset_size(s->inputs) == 1) {
1799 alt_i = pa_idxset_first(s->inputs, &idx);
1801 if (pa_sink_input_is_passthrough(alt_i))
1808 /* Called from main context */
1809 void pa_sink_enter_passthrough(pa_sink *s) {
1812 /* The sink implementation is reconfigured for passthrough in
1813 * pa_sink_reconfigure(). This function sets the PA core objects to
1814 * passthrough mode. */
1816 /* disable the monitor in passthrough mode */
1817 if (s->monitor_source) {
1818 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1819 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1822 /* set the volume to NORM */
1823 s->saved_volume = *pa_sink_get_volume(s, true);
1824 s->saved_save_volume = s->save_volume;
1826 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1827 pa_sink_set_volume(s, &volume, true, false);
1829 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1832 /* Called from main context */
1833 void pa_sink_leave_passthrough(pa_sink *s) {
1834 /* Unsuspend monitor */
1835 if (s->monitor_source) {
1836 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1837 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1840 /* Restore sink volume to what it was before we entered passthrough mode */
1841 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1843 pa_cvolume_init(&s->saved_volume);
1844 s->saved_save_volume = false;
1848 /* Called from main context. */
1849 static void compute_reference_ratio(pa_sink_input *i) {
1851 pa_cvolume remapped;
1855 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1858 * Calculates the reference ratio from the sink's reference
1859 * volume. This basically calculates:
1861 * i->reference_ratio = i->volume / i->sink->reference_volume
1864 remapped = i->sink->reference_volume;
1865 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1867 ratio = i->reference_ratio;
1869 for (c = 0; c < i->sample_spec.channels; c++) {
1871 /* We don't update when the sink volume is 0 anyway */
1872 if (remapped.values[c] <= PA_VOLUME_MUTED)
1875 /* Don't update the reference ratio unless necessary */
1876 if (pa_sw_volume_multiply(
1878 remapped.values[c]) == i->volume.values[c])
1881 ratio.values[c] = pa_sw_volume_divide(
1882 i->volume.values[c],
1883 remapped.values[c]);
1886 pa_sink_input_set_reference_ratio(i, &ratio);
1889 /* Called from main context. Only called for the root sink in volume sharing
1890 * cases, except for internal recursive calls. */
1891 static void compute_reference_ratios(pa_sink *s) {
1895 pa_sink_assert_ref(s);
1896 pa_assert_ctl_context();
1897 pa_assert(PA_SINK_IS_LINKED(s->state));
1898 pa_assert(pa_sink_flat_volume_enabled(s));
1900 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1901 compute_reference_ratio(i);
1903 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1904 && PA_SINK_IS_LINKED(i->origin_sink->state))
1905 compute_reference_ratios(i->origin_sink);
1909 /* Called from main context. Only called for the root sink in volume sharing
1910 * cases, except for internal recursive calls. */
1911 static void compute_real_ratios(pa_sink *s) {
1915 pa_sink_assert_ref(s);
1916 pa_assert_ctl_context();
1917 pa_assert(PA_SINK_IS_LINKED(s->state));
1918 pa_assert(pa_sink_flat_volume_enabled(s));
1920 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1922 pa_cvolume remapped;
1924 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1925 /* The origin sink uses volume sharing, so this input's real ratio
1926 * is handled as a special case - the real ratio must be 0 dB, and
1927 * as a result i->soft_volume must equal i->volume_factor. */
1928 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1929 i->soft_volume = i->volume_factor;
1931 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1932 compute_real_ratios(i->origin_sink);
1938 * This basically calculates:
1940 * i->real_ratio := i->volume / s->real_volume
1941 * i->soft_volume := i->real_ratio * i->volume_factor
1944 remapped = s->real_volume;
1945 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1947 i->real_ratio.channels = i->sample_spec.channels;
1948 i->soft_volume.channels = i->sample_spec.channels;
1950 for (c = 0; c < i->sample_spec.channels; c++) {
1952 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1953 /* We leave i->real_ratio untouched */
1954 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1958 /* Don't lose accuracy unless necessary */
1959 if (pa_sw_volume_multiply(
1960 i->real_ratio.values[c],
1961 remapped.values[c]) != i->volume.values[c])
1963 i->real_ratio.values[c] = pa_sw_volume_divide(
1964 i->volume.values[c],
1965 remapped.values[c]);
1967 i->soft_volume.values[c] = pa_sw_volume_multiply(
1968 i->real_ratio.values[c],
1969 i->volume_factor.values[c]);
1972 /* We don't copy the soft_volume to the thread_info data
1973 * here. That must be done by the caller */
1977 static pa_cvolume *cvolume_remap_minimal_impact(
1979 const pa_cvolume *template,
1980 const pa_channel_map *from,
1981 const pa_channel_map *to) {
1986 pa_assert(template);
1989 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1990 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1992 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1993 * mapping from sink input to sink volumes:
1995 * If template is a possible remapping from v it is used instead
1996 * of remapping anew.
1998 * If the channel maps don't match we set an all-channel volume on
1999 * the sink to ensure that changing a volume on one stream has no
2000 * effect that cannot be compensated for in another stream that
2001 * does not have the same channel map as the sink. */
2003 if (pa_channel_map_equal(from, to))
2007 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
2012 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
2016 /* Called from main thread. Only called for the root sink in volume sharing
2017 * cases, except for internal recursive calls. */
2018 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
2022 pa_sink_assert_ref(s);
2023 pa_assert(max_volume);
2024 pa_assert(channel_map);
2025 pa_assert(pa_sink_flat_volume_enabled(s));
2027 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2028 pa_cvolume remapped;
2030 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2031 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2032 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
2034 /* Ignore this input. The origin sink uses volume sharing, so this
2035 * input's volume will be set to be equal to the root sink's real
2036 * volume. Obviously this input's current volume must not then
2037 * affect what the root sink's real volume will be. */
2041 remapped = i->volume;
2042 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
2043 pa_cvolume_merge(max_volume, max_volume, &remapped);
2047 /* Called from main thread. Only called for the root sink in volume sharing
2048 * cases, except for internal recursive calls. */
2049 static bool has_inputs(pa_sink *s) {
2053 pa_sink_assert_ref(s);
2055 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2056 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2063 /* Called from main thread. Only called for the root sink in volume sharing
2064 * cases, except for internal recursive calls. */
2065 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2069 pa_sink_assert_ref(s);
2070 pa_assert(new_volume);
2071 pa_assert(channel_map);
2073 s->real_volume = *new_volume;
2074 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2076 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2077 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2078 if (pa_sink_flat_volume_enabled(s)) {
2079 pa_cvolume new_input_volume;
2081 /* Follow the root sink's real volume. */
2082 new_input_volume = *new_volume;
2083 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2084 pa_sink_input_set_volume_direct(i, &new_input_volume);
2085 compute_reference_ratio(i);
2088 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2089 update_real_volume(i->origin_sink, new_volume, channel_map);
2094 /* Called from main thread. Only called for the root sink in shared volume
2096 static void compute_real_volume(pa_sink *s) {
2097 pa_sink_assert_ref(s);
2098 pa_assert_ctl_context();
2099 pa_assert(PA_SINK_IS_LINKED(s->state));
2100 pa_assert(pa_sink_flat_volume_enabled(s));
2101 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2103 /* This determines the maximum volume of all streams and sets
2104 * s->real_volume accordingly. */
2106 if (!has_inputs(s)) {
2107 /* In the special case that we have no sink inputs we leave the
2108 * volume unmodified. */
2109 update_real_volume(s, &s->reference_volume, &s->channel_map);
2113 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2115 /* First let's determine the new maximum volume of all inputs
2116 * connected to this sink */
2117 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2118 update_real_volume(s, &s->real_volume, &s->channel_map);
2120 /* Then, let's update the real ratios/soft volumes of all inputs
2121 * connected to this sink */
2122 compute_real_ratios(s);
2125 /* Called from main thread. Only called for the root sink in shared volume
2126 * cases, except for internal recursive calls. */
2127 static void propagate_reference_volume(pa_sink *s) {
2131 pa_sink_assert_ref(s);
2132 pa_assert_ctl_context();
2133 pa_assert(PA_SINK_IS_LINKED(s->state));
2134 pa_assert(pa_sink_flat_volume_enabled(s));
2136 /* This is called whenever the sink volume changes that is not
2137 * caused by a sink input volume change. We need to fix up the
2138 * sink input volumes accordingly */
2140 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2141 pa_cvolume new_volume;
2143 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2144 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2145 propagate_reference_volume(i->origin_sink);
2147 /* Since the origin sink uses volume sharing, this input's volume
2148 * needs to be updated to match the root sink's real volume, but
2149 * that will be done later in update_real_volume(). */
2153 /* This basically calculates:
2155 * i->volume := s->reference_volume * i->reference_ratio */
2157 new_volume = s->reference_volume;
2158 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2159 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2160 pa_sink_input_set_volume_direct(i, &new_volume);
2164 /* Called from main thread. Only called for the root sink in volume sharing
2165 * cases, except for internal recursive calls. The return value indicates
2166 * whether any reference volume actually changed. */
2167 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2169 bool reference_volume_changed;
2173 pa_sink_assert_ref(s);
2174 pa_assert(PA_SINK_IS_LINKED(s->state));
2176 pa_assert(channel_map);
2177 pa_assert(pa_cvolume_valid(v));
2180 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2182 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2183 pa_sink_set_reference_volume_direct(s, &volume);
2185 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2187 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2188 /* If the root sink's volume doesn't change, then there can't be any
2189 * changes in the other sinks in the sink tree either.
2191 * It's probably theoretically possible that even if the root sink's
2192 * volume changes slightly, some filter sink doesn't change its volume
2193 * due to rounding errors. If that happens, we still want to propagate
2194 * the changed root sink volume to the sinks connected to the
2195 * intermediate sink that didn't change its volume. This theoretical
2196 * possibility is the reason why we have that !(s->flags &
2197 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2198 * notice even if we returned here false always if
2199 * reference_volume_changed is false. */
2202 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2203 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2204 && PA_SINK_IS_LINKED(i->origin_sink->state))
2205 update_reference_volume(i->origin_sink, v, channel_map, false);
2211 /* Called from main thread */
2212 void pa_sink_set_volume(
2214 const pa_cvolume *volume,
2218 pa_cvolume new_reference_volume;
2221 pa_sink_assert_ref(s);
2222 pa_assert_ctl_context();
2223 pa_assert(PA_SINK_IS_LINKED(s->state));
2224 pa_assert(!volume || pa_cvolume_valid(volume));
2225 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2226 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2228 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2229 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2230 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2231 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2235 /* In case of volume sharing, the volume is set for the root sink first,
2236 * from which it's then propagated to the sharing sinks. */
2237 root_sink = pa_sink_get_master(s);
2239 if (PA_UNLIKELY(!root_sink))
2242 /* As a special exception we accept mono volumes on all sinks --
2243 * even on those with more complex channel maps */
2246 if (pa_cvolume_compatible(volume, &s->sample_spec))
2247 new_reference_volume = *volume;
2249 new_reference_volume = s->reference_volume;
2250 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2253 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2255 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2256 if (pa_sink_flat_volume_enabled(root_sink)) {
2257 /* OK, propagate this volume change back to the inputs */
2258 propagate_reference_volume(root_sink);
2260 /* And now recalculate the real volume */
2261 compute_real_volume(root_sink);
2263 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2267 /* If volume is NULL we synchronize the sink's real and
2268 * reference volumes with the stream volumes. */
2270 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2272 /* Ok, let's determine the new real volume */
2273 compute_real_volume(root_sink);
2275 /* Let's 'push' the reference volume if necessary */
2276 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2277 /* If the sink and its root don't have the same number of channels, we need to remap */
2278 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2279 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2280 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2282 /* Now that the reference volume is updated, we can update the streams'
2283 * reference ratios. */
2284 compute_reference_ratios(root_sink);
2287 if (root_sink->set_volume) {
2288 /* If we have a function set_volume(), then we do not apply a
2289 * soft volume by default. However, set_volume() is free to
2290 * apply one to root_sink->soft_volume */
2292 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2293 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2294 root_sink->set_volume(root_sink);
2297 /* If we have no function set_volume(), then the soft volume
2298 * becomes the real volume */
2299 root_sink->soft_volume = root_sink->real_volume;
2301 /* This tells the sink that soft volume and/or real volume changed */
2303 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2306 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2307 * Only to be called by sink implementor */
2308 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2310 pa_sink_assert_ref(s);
2311 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2313 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2314 pa_sink_assert_io_context(s);
2316 pa_assert_ctl_context();
2319 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2321 s->soft_volume = *volume;
2323 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2324 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2326 s->thread_info.soft_volume = s->soft_volume;
2329 /* Called from the main thread. Only called for the root sink in volume sharing
2330 * cases, except for internal recursive calls. */
2331 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2335 pa_sink_assert_ref(s);
2336 pa_assert(old_real_volume);
2337 pa_assert_ctl_context();
2338 pa_assert(PA_SINK_IS_LINKED(s->state));
2340 /* This is called when the hardware's real volume changes due to
2341 * some external event. We copy the real volume into our
2342 * reference volume and then rebuild the stream volumes based on
2343 * i->real_ratio which should stay fixed. */
2345 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2346 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2349 /* 1. Make the real volume the reference volume */
2350 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2353 if (pa_sink_flat_volume_enabled(s)) {
2355 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2356 pa_cvolume new_volume;
2358 /* 2. Since the sink's reference and real volumes are equal
2359 * now our ratios should be too. */
2360 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2362 /* 3. Recalculate the new stream reference volume based on the
2363 * reference ratio and the sink's reference volume.
2365 * This basically calculates:
2367 * i->volume = s->reference_volume * i->reference_ratio
2369 * This is identical to propagate_reference_volume() */
2370 new_volume = s->reference_volume;
2371 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2372 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2373 pa_sink_input_set_volume_direct(i, &new_volume);
2375 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2376 && PA_SINK_IS_LINKED(i->origin_sink->state))
2377 propagate_real_volume(i->origin_sink, old_real_volume);
2381 /* Something got changed in the hardware. It probably makes sense
2382 * to save changed hw settings given that hw volume changes not
2383 * triggered by PA are almost certainly done by the user. */
2384 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2385 s->save_volume = true;
2388 /* Called from io thread */
2389 void pa_sink_update_volume_and_mute(pa_sink *s) {
2391 pa_sink_assert_io_context(s);
2393 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2396 /* Called from main thread */
2397 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2398 pa_sink_assert_ref(s);
2399 pa_assert_ctl_context();
2400 pa_assert(PA_SINK_IS_LINKED(s->state));
2402 if (s->refresh_volume || force_refresh) {
2403 struct pa_cvolume old_real_volume;
2405 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2407 old_real_volume = s->real_volume;
2409 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2412 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2414 update_real_volume(s, &s->real_volume, &s->channel_map);
2415 propagate_real_volume(s, &old_real_volume);
2418 return &s->reference_volume;
2421 /* Called from main thread. In volume sharing cases, only the root sink may
2423 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2424 pa_cvolume old_real_volume;
2426 pa_sink_assert_ref(s);
2427 pa_assert_ctl_context();
2428 pa_assert(PA_SINK_IS_LINKED(s->state));
2429 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2431 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2433 old_real_volume = s->real_volume;
2434 update_real_volume(s, new_real_volume, &s->channel_map);
2435 propagate_real_volume(s, &old_real_volume);
2438 /* Called from main thread */
2439 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2442 pa_sink_assert_ref(s);
2443 pa_assert_ctl_context();
2445 old_muted = s->muted;
2447 if (mute == old_muted) {
2448 s->save_muted |= save;
2453 s->save_muted = save;
2455 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2456 s->set_mute_in_progress = true;
2458 s->set_mute_in_progress = false;
2461 if (!PA_SINK_IS_LINKED(s->state))
2464 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2465 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2466 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2467 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2470 /* Called from main thread */
2471 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2473 pa_sink_assert_ref(s);
2474 pa_assert_ctl_context();
2475 pa_assert(PA_SINK_IS_LINKED(s->state));
2477 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2480 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2481 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2482 pa_sink_mute_changed(s, mute);
2484 if (s->get_mute(s, &mute) >= 0)
2485 pa_sink_mute_changed(s, mute);
2492 /* Called from main thread */
2493 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2494 pa_sink_assert_ref(s);
2495 pa_assert_ctl_context();
2496 pa_assert(PA_SINK_IS_LINKED(s->state));
2498 if (s->set_mute_in_progress)
2501 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2502 * but we must have this here also, because the save parameter of
2503 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2504 * the mute state when it shouldn't be saved). */
2505 if (new_muted == s->muted)
2508 pa_sink_set_mute(s, new_muted, true);
2511 /* Called from main thread */
2513 void pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2515 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2517 pa_sink_assert_ref(s);
2518 pa_assert_ctl_context();
2521 pa_proplist_update(s->proplist, mode, p);
2523 if (PA_SINK_IS_LINKED(s->state)) {
2524 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2525 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2534 /* Called from main thread */
2535 void pa_sink_update_proplist_remote_access_permission(pa_sink *s, bool allowed) {
2536 pa_proplist* p = NULL;
2538 pa_sink_assert_ref(s);
2539 pa_assert_ctl_context();
2541 p = pa_proplist_new();
2543 if (pa_proplist_set_remote_access_permission(p, allowed) == 0)
2544 pa_sink_update_proplist(s, PA_UPDATE_REPLACE, p);
2546 pa_log_error("set remote access permission %d on proplist %p failed", allowed, p);
2548 pa_proplist_free(p);
2550 #endif /* __TIZEN__ */
2552 /* Called from main thread */
2553 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2554 void pa_sink_set_description(pa_sink *s, const char *description) {
2556 pa_sink_assert_ref(s);
2557 pa_assert_ctl_context();
2559 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2562 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2564 if (old && description && pa_streq(old, description))
2568 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2570 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2572 if (s->monitor_source) {
2575 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2576 pa_source_set_description(s->monitor_source, n);
2580 if (PA_SINK_IS_LINKED(s->state)) {
2581 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2582 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2586 /* Called from main thread */
2587 unsigned pa_sink_linked_by(pa_sink *s) {
2590 pa_sink_assert_ref(s);
2591 pa_assert_ctl_context();
2592 pa_assert(PA_SINK_IS_LINKED(s->state));
2594 ret = pa_idxset_size(s->inputs);
2596 /* We add in the number of streams connected to us here. Please
2597 * note the asymmetry to pa_sink_used_by()! */
2599 if (s->monitor_source)
2600 ret += pa_source_linked_by(s->monitor_source);
2605 /* Called from main thread */
2606 unsigned pa_sink_used_by(pa_sink *s) {
2609 pa_sink_assert_ref(s);
2610 pa_assert_ctl_context();
2611 pa_assert(PA_SINK_IS_LINKED(s->state));
2613 ret = pa_idxset_size(s->inputs);
2614 pa_assert(ret >= s->n_corked);
2616 /* Streams connected to our monitor source do not matter for
2617 * pa_sink_used_by()!.*/
2619 return ret - s->n_corked;
2622 /* Called from main thread */
2623 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2628 pa_sink_assert_ref(s);
2629 pa_assert_ctl_context();
2631 if (!PA_SINK_IS_LINKED(s->state))
2636 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2637 if (i == ignore_input)
2640 /* We do not assert here. It is perfectly valid for a sink input to
2641 * be in the INIT state (i.e. created, marked done but not yet put)
2642 * and we should not care if it's unlinked as it won't contribute
2643 * towards our busy status.
2645 if (!PA_SINK_INPUT_IS_LINKED(i->state))
2648 if (i->state == PA_SINK_INPUT_CORKED)
2651 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2657 if (s->monitor_source)
2658 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2663 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2665 case PA_SINK_INIT: return "INIT";
2666 case PA_SINK_IDLE: return "IDLE";
2667 case PA_SINK_RUNNING: return "RUNNING";
2668 case PA_SINK_SUSPENDED: return "SUSPENDED";
2669 case PA_SINK_UNLINKED: return "UNLINKED";
2670 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2673 pa_assert_not_reached();
2676 /* Called from the IO thread */
2677 static void sync_input_volumes_within_thread(pa_sink *s) {
2681 pa_sink_assert_ref(s);
2682 pa_sink_assert_io_context(s);
2684 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2685 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2688 i->thread_info.soft_volume = i->soft_volume;
2689 pa_sink_input_request_rewind(i, 0, true, false, false);
2693 /* Called from the IO thread. Only called for the root sink in volume sharing
2694 * cases, except for internal recursive calls. */
2695 static void set_shared_volume_within_thread(pa_sink *s) {
2696 pa_sink_input *i = NULL;
2699 pa_sink_assert_ref(s);
2701 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2703 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2704 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2705 set_shared_volume_within_thread(i->origin_sink);
2709 /* Called from IO thread, except when it is not */
2710 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2711 pa_sink *s = PA_SINK(o);
2712 pa_sink_assert_ref(s);
2714 switch ((pa_sink_message_t) code) {
2716 case PA_SINK_MESSAGE_ADD_INPUT: {
2717 pa_sink_input *i = PA_SINK_INPUT(userdata);
2719 /* If you change anything here, make sure to change the
2720 * sink input handling a few lines down at
2721 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2723 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2725 /* Since the caller sleeps in pa_sink_input_put(), we can
2726 * safely access data outside of thread_info even though
2729 if ((i->thread_info.sync_prev = i->sync_prev)) {
2730 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2731 pa_assert(i->sync_prev->sync_next == i);
2732 i->thread_info.sync_prev->thread_info.sync_next = i;
2735 if ((i->thread_info.sync_next = i->sync_next)) {
2736 pa_assert(i->sink == i->thread_info.sync_next->sink);
2737 pa_assert(i->sync_next->sync_prev == i);
2738 i->thread_info.sync_next->thread_info.sync_prev = i;
2741 pa_sink_input_attach(i);
2743 pa_sink_input_set_state_within_thread(i, i->state);
2745 /* The requested latency of the sink input needs to be fixed up and
2746 * then configured on the sink. If this causes the sink latency to
2747 * go down, the sink implementor is responsible for doing a rewind
2748 * in the update_requested_latency() callback to ensure that the
2749 * sink buffer doesn't contain more data than what the new latency
2752 * XXX: Does it really make sense to push this responsibility to
2753 * the sink implementors? Wouldn't it be better to do it once in
2754 * the core than many times in the modules? */
2756 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2757 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2759 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2760 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2762 /* We don't rewind here automatically. This is left to the
2763 * sink input implementor because some sink inputs need a
2764 * slow start, i.e. need some time to buffer client
2765 * samples before beginning streaming.
2767 * XXX: Does it really make sense to push this functionality to
2768 * the sink implementors? Wouldn't it be better to do it once in
2769 * the core than many times in the modules? */
2771 /* In flat volume mode we need to update the volume as
2773 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2776 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2777 pa_sink_input *i = PA_SINK_INPUT(userdata);
2779 /* If you change anything here, make sure to change the
2780 * sink input handling a few lines down at
2781 * PA_SINK_MESSAGE_START_MOVE, too. */
2783 pa_sink_input_detach(i);
2785 pa_sink_input_set_state_within_thread(i, i->state);
2787 /* Since the caller sleeps in pa_sink_input_unlink(),
2788 * we can safely access data outside of thread_info even
2789 * though it is mutable */
2791 pa_assert(!i->sync_prev);
2792 pa_assert(!i->sync_next);
2794 if (i->thread_info.sync_prev) {
2795 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2796 i->thread_info.sync_prev = NULL;
2799 if (i->thread_info.sync_next) {
2800 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2801 i->thread_info.sync_next = NULL;
2804 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2805 pa_sink_invalidate_requested_latency(s, true);
2806 pa_sink_request_rewind(s, (size_t) -1);
2808 /* In flat volume mode we need to update the volume as
2810 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2813 case PA_SINK_MESSAGE_START_MOVE: {
2814 pa_sink_input *i = PA_SINK_INPUT(userdata);
2816 /* We don't support moving synchronized streams. */
2817 pa_assert(!i->sync_prev);
2818 pa_assert(!i->sync_next);
2819 pa_assert(!i->thread_info.sync_next);
2820 pa_assert(!i->thread_info.sync_prev);
2822 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2824 size_t sink_nbytes, total_nbytes;
2826 /* The old sink probably has some audio from this
2827 * stream in its buffer. We want to "take it back" as
2828 * much as possible and play it to the new sink. We
2829 * don't know at this point how much the old sink can
2830 * rewind. We have to pick something, and that
2831 * something is the full latency of the old sink here.
2832 * So we rewind the stream buffer by the sink latency
2833 * amount, which may be more than what we should
2834 * rewind. This can result in a chunk of audio being
2835 * played both to the old sink and the new sink.
2837 * FIXME: Fix this code so that we don't have to make
2838 * guesses about how much the sink will actually be
2839 * able to rewind. If someone comes up with a solution
2840 * for this, something to note is that the part of the
2841 * latency that the old sink couldn't rewind should
2842 * ideally be compensated after the stream has moved
2843 * to the new sink by adding silence. The new sink
2844 * most likely can't start playing the moved stream
2845 * immediately, and that gap should be removed from
2846 * the "compensation silence" (at least at the time of
2847 * writing this, the move finish code will actually
2848 * already take care of dropping the new sink's
2849 * unrewindable latency, so taking into account the
2850 * unrewindable latency of the old sink is the only
2853 * The render_memblockq contents are discarded,
2854 * because when the sink changes, the format of the
2855 * audio stored in the render_memblockq may change
2856 * too, making the stored audio invalid. FIXME:
2857 * However, the read and write indices are moved back
2858 * the same amount, so if they are not the same now,
2859 * they won't be the same after the rewind either. If
2860 * the write index of the render_memblockq is ahead of
2861 * the read index, then the render_memblockq will feed
2862 * the new sink some silence first, which it shouldn't
2863 * do. The write index should be flushed to be the
2864 * same as the read index. */
2866 /* Get the latency of the sink */
2867 usec = pa_sink_get_latency_within_thread(s, false);
2868 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2869 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2871 if (total_nbytes > 0) {
2872 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2873 i->thread_info.rewrite_flush = true;
2874 pa_sink_input_process_rewind(i, sink_nbytes);
2878 pa_sink_input_detach(i);
2880 /* Let's remove the sink input ...*/
2881 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2883 pa_sink_invalidate_requested_latency(s, true);
2885 pa_log_debug("Requesting rewind due to started move");
2886 pa_sink_request_rewind(s, (size_t) -1);
2888 /* In flat volume mode we need to update the volume as
2890 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2893 case PA_SINK_MESSAGE_FINISH_MOVE: {
2894 pa_sink_input *i = PA_SINK_INPUT(userdata);
2896 /* We don't support moving synchronized streams. */
2897 pa_assert(!i->sync_prev);
2898 pa_assert(!i->sync_next);
2899 pa_assert(!i->thread_info.sync_next);
2900 pa_assert(!i->thread_info.sync_prev);
2902 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2904 pa_sink_input_attach(i);
2906 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2910 /* In the ideal case the new sink would start playing
2911 * the stream immediately. That requires the sink to
2912 * be able to rewind all of its latency, which usually
2913 * isn't possible, so there will probably be some gap
2914 * before the moved stream becomes audible. We then
2915 * have two possibilities: 1) start playing the stream
2916 * from where it is now, or 2) drop the unrewindable
2917 * latency of the sink from the stream. With option 1
2918 * we won't lose any audio but the stream will have a
2919 * pause. With option 2 we may lose some audio but the
2920 * stream time will be somewhat in sync with the wall
2921 * clock. Lennart seems to have chosen option 2 (one
2922 * of the reasons might have been that option 1 is
2923 * actually much harder to implement), so we drop the
2924 * latency of the new sink from the moved stream and
2925 * hope that the sink will undo most of that in the
2928 /* Get the latency of the sink */
2929 usec = pa_sink_get_latency_within_thread(s, false);
2930 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2933 pa_sink_input_drop(i, nbytes);
2935 pa_log_debug("Requesting rewind due to finished move");
2936 pa_sink_request_rewind(s, nbytes);
2939 /* Updating the requested sink latency has to be done
2940 * after the sink rewind request, not before, because
2941 * otherwise the sink may limit the rewind amount
2944 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2945 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2947 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2948 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2950 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2953 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2954 pa_sink *root_sink = pa_sink_get_master(s);
2956 if (PA_LIKELY(root_sink))
2957 set_shared_volume_within_thread(root_sink);
2962 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2964 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2966 pa_sink_volume_change_push(s);
2968 /* Fall through ... */
2970 case PA_SINK_MESSAGE_SET_VOLUME:
2972 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2973 s->thread_info.soft_volume = s->soft_volume;
2974 pa_sink_request_rewind(s, (size_t) -1);
2977 /* Fall through ... */
2979 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2980 sync_input_volumes_within_thread(s);
2983 case PA_SINK_MESSAGE_GET_VOLUME:
2985 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2987 pa_sink_volume_change_flush(s);
2988 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2991 /* In case sink implementor reset SW volume. */
2992 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2993 s->thread_info.soft_volume = s->soft_volume;
2994 pa_sink_request_rewind(s, (size_t) -1);
2999 case PA_SINK_MESSAGE_SET_MUTE:
3001 if (s->thread_info.soft_muted != s->muted) {
3002 s->thread_info.soft_muted = s->muted;
3003 pa_sink_request_rewind(s, (size_t) -1);
3006 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
3011 case PA_SINK_MESSAGE_GET_MUTE:
3013 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
3014 return s->get_mute(s, userdata);
3018 case PA_SINK_MESSAGE_SET_STATE: {
3019 struct set_state_data *data = userdata;
3020 bool suspend_change =
3021 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
3022 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
3024 if (s->set_state_in_io_thread) {
3027 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
3031 s->thread_info.state = data->state;
3033 if (s->thread_info.state == PA_SINK_SUSPENDED) {
3034 s->thread_info.rewind_nbytes = 0;
3035 s->thread_info.rewind_requested = false;
3038 if (suspend_change) {
3042 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
3043 if (i->suspend_within_thread)
3044 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
3050 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
3052 pa_usec_t *usec = userdata;
3053 *usec = pa_sink_get_requested_latency_within_thread(s);
3055 /* Yes, that's right, the IO thread will see -1 when no
3056 * explicit requested latency is configured, the main
3057 * thread will see max_latency */
3058 if (*usec == (pa_usec_t) -1)
3059 *usec = s->thread_info.max_latency;
3064 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
3065 pa_usec_t *r = userdata;
3067 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
3072 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
3073 pa_usec_t *r = userdata;
3075 r[0] = s->thread_info.min_latency;
3076 r[1] = s->thread_info.max_latency;
3081 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3083 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3086 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3088 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3091 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3093 *((size_t*) userdata) = s->thread_info.max_rewind;
3096 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3098 *((size_t*) userdata) = s->thread_info.max_request;
3101 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3103 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3106 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3108 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3111 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3112 /* This message is sent from IO-thread and handled in main thread. */
3113 pa_assert_ctl_context();
3115 /* Make sure we're not messing with main thread when no longer linked */
3116 if (!PA_SINK_IS_LINKED(s->state))
3119 pa_sink_get_volume(s, true);
3120 pa_sink_get_mute(s, true);
3123 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3124 s->thread_info.port_latency_offset = offset;
3127 case PA_SINK_MESSAGE_GET_LATENCY:
3128 case PA_SINK_MESSAGE_MAX:
3135 /* Called from main thread */
3136 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3141 pa_core_assert_ref(c);
3142 pa_assert_ctl_context();
3143 pa_assert(cause != 0);
3145 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3148 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3155 /* Called from IO thread */
3156 void pa_sink_detach_within_thread(pa_sink *s) {
3160 pa_sink_assert_ref(s);
3161 pa_sink_assert_io_context(s);
3162 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3164 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3165 pa_sink_input_detach(i);
3167 if (s->monitor_source)
3168 pa_source_detach_within_thread(s->monitor_source);
3171 /* Called from IO thread */
3172 void pa_sink_attach_within_thread(pa_sink *s) {
3176 pa_sink_assert_ref(s);
3177 pa_sink_assert_io_context(s);
3178 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3180 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3181 pa_sink_input_attach(i);
3183 if (s->monitor_source)
3184 pa_source_attach_within_thread(s->monitor_source);
3187 /* Called from IO thread */
3188 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3189 pa_sink_assert_ref(s);
3190 pa_sink_assert_io_context(s);
3191 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3193 if (nbytes == (size_t) -1)
3194 nbytes = s->thread_info.max_rewind;
3196 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3198 if (s->thread_info.rewind_requested &&
3199 nbytes <= s->thread_info.rewind_nbytes)
3202 s->thread_info.rewind_nbytes = nbytes;
3203 s->thread_info.rewind_requested = true;
3205 if (s->request_rewind)
3206 s->request_rewind(s);
3209 /* Called from IO thread */
3210 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3211 pa_usec_t result = (pa_usec_t) -1;
3214 pa_usec_t monitor_latency;
3216 pa_sink_assert_ref(s);
3217 pa_sink_assert_io_context(s);
3219 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3220 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3222 if (s->thread_info.requested_latency_valid)
3223 return s->thread_info.requested_latency;
3225 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3226 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3227 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3228 result = i->thread_info.requested_sink_latency;
3230 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3232 if (monitor_latency != (pa_usec_t) -1 &&
3233 (result == (pa_usec_t) -1 || result > monitor_latency))
3234 result = monitor_latency;
3236 if (result != (pa_usec_t) -1)
3237 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3239 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3240 /* Only cache if properly initialized */
3241 s->thread_info.requested_latency = result;
3242 s->thread_info.requested_latency_valid = true;
3248 /* Called from main thread */
3249 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3252 pa_sink_assert_ref(s);
3253 pa_assert_ctl_context();
3254 pa_assert(PA_SINK_IS_LINKED(s->state));
3256 if (s->state == PA_SINK_SUSPENDED)
3259 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3264 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3265 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3269 pa_sink_assert_ref(s);
3270 pa_sink_assert_io_context(s);
3272 if (max_rewind == s->thread_info.max_rewind)
3275 s->thread_info.max_rewind = max_rewind;
3277 if (PA_SINK_IS_LINKED(s->thread_info.state))
3278 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3279 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3281 if (s->monitor_source)
3282 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3285 /* Called from main thread */
3286 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3287 pa_sink_assert_ref(s);
3288 pa_assert_ctl_context();
3290 if (PA_SINK_IS_LINKED(s->state))
3291 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3293 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3296 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3297 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3300 pa_sink_assert_ref(s);
3301 pa_sink_assert_io_context(s);
3303 if (max_request == s->thread_info.max_request)
3306 s->thread_info.max_request = max_request;
3308 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3311 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3312 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3316 /* Called from main thread */
3317 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3318 pa_sink_assert_ref(s);
3319 pa_assert_ctl_context();
3321 if (PA_SINK_IS_LINKED(s->state))
3322 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3324 pa_sink_set_max_request_within_thread(s, max_request);
3327 /* Called from IO thread */
3328 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3332 pa_sink_assert_ref(s);
3333 pa_sink_assert_io_context(s);
3335 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3336 s->thread_info.requested_latency_valid = false;
3340 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3342 if (s->update_requested_latency)
3343 s->update_requested_latency(s);
3345 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3346 if (i->update_sink_requested_latency)
3347 i->update_sink_requested_latency(i);
3351 /* Called from main thread */
3352 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3353 pa_sink_assert_ref(s);
3354 pa_assert_ctl_context();
3356 /* min_latency == 0: no limit
3357 * min_latency anything else: specified limit
3359 * Similar for max_latency */
3361 if (min_latency < ABSOLUTE_MIN_LATENCY)
3362 min_latency = ABSOLUTE_MIN_LATENCY;
3364 if (max_latency <= 0 ||
3365 max_latency > ABSOLUTE_MAX_LATENCY)
3366 max_latency = ABSOLUTE_MAX_LATENCY;
3368 pa_assert(min_latency <= max_latency);
3370 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3371 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3372 max_latency == ABSOLUTE_MAX_LATENCY) ||
3373 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3375 if (PA_SINK_IS_LINKED(s->state)) {
3381 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3383 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3386 /* Called from main thread */
3387 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3388 pa_sink_assert_ref(s);
3389 pa_assert_ctl_context();
3390 pa_assert(min_latency);
3391 pa_assert(max_latency);
3393 if (PA_SINK_IS_LINKED(s->state)) {
3394 pa_usec_t r[2] = { 0, 0 };
3396 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3398 *min_latency = r[0];
3399 *max_latency = r[1];
3401 *min_latency = s->thread_info.min_latency;
3402 *max_latency = s->thread_info.max_latency;
3406 /* Called from IO thread */
3407 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3408 pa_sink_assert_ref(s);
3409 pa_sink_assert_io_context(s);
3411 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3412 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3413 pa_assert(min_latency <= max_latency);
3415 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3416 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3417 max_latency == ABSOLUTE_MAX_LATENCY) ||
3418 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3420 if (s->thread_info.min_latency == min_latency &&
3421 s->thread_info.max_latency == max_latency)
3424 s->thread_info.min_latency = min_latency;
3425 s->thread_info.max_latency = max_latency;
3427 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3431 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3432 if (i->update_sink_latency_range)
3433 i->update_sink_latency_range(i);
3436 pa_sink_invalidate_requested_latency(s, false);
3438 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3441 /* Called from main thread */
3442 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3443 pa_sink_assert_ref(s);
3444 pa_assert_ctl_context();
3446 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3447 pa_assert(latency == 0);
3451 if (latency < ABSOLUTE_MIN_LATENCY)
3452 latency = ABSOLUTE_MIN_LATENCY;
3454 if (latency > ABSOLUTE_MAX_LATENCY)
3455 latency = ABSOLUTE_MAX_LATENCY;
3457 if (PA_SINK_IS_LINKED(s->state))
3458 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3460 s->thread_info.fixed_latency = latency;
3462 pa_source_set_fixed_latency(s->monitor_source, latency);
3465 /* Called from main thread */
3466 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3469 pa_sink_assert_ref(s);
3470 pa_assert_ctl_context();
3472 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3475 if (PA_SINK_IS_LINKED(s->state))
3476 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3478 latency = s->thread_info.fixed_latency;
3483 /* Called from IO thread */
3484 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3485 pa_sink_assert_ref(s);
3486 pa_sink_assert_io_context(s);
3488 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3489 pa_assert(latency == 0);
3490 s->thread_info.fixed_latency = 0;
3492 if (s->monitor_source)
3493 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3498 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3499 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3501 if (s->thread_info.fixed_latency == latency)
3504 s->thread_info.fixed_latency = latency;
3506 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3510 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3511 if (i->update_sink_fixed_latency)
3512 i->update_sink_fixed_latency(i);
3515 pa_sink_invalidate_requested_latency(s, false);
3517 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3520 /* Called from main context */
3521 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3522 pa_sink_assert_ref(s);
3524 s->port_latency_offset = offset;
3526 if (PA_SINK_IS_LINKED(s->state))
3527 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3529 s->thread_info.port_latency_offset = offset;
3531 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3534 /* Called from main context */
3535 size_t pa_sink_get_max_rewind(pa_sink *s) {
3537 pa_assert_ctl_context();
3538 pa_sink_assert_ref(s);
3540 if (!PA_SINK_IS_LINKED(s->state))
3541 return s->thread_info.max_rewind;
3543 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3548 /* Called from main context */
3549 size_t pa_sink_get_max_request(pa_sink *s) {
3551 pa_sink_assert_ref(s);
3552 pa_assert_ctl_context();
3554 if (!PA_SINK_IS_LINKED(s->state))
3555 return s->thread_info.max_request;
3557 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3562 /* Called from main context */
3563 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3564 pa_device_port *port;
3566 pa_sink_assert_ref(s);
3567 pa_assert_ctl_context();
3570 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3571 return -PA_ERR_NOTIMPLEMENTED;
3575 return -PA_ERR_NOENTITY;
3577 if (!(port = pa_hashmap_get(s->ports, name)))
3578 return -PA_ERR_NOENTITY;
3580 if (s->active_port == port) {
3581 s->save_port = s->save_port || save;
3585 s->port_changing = true;
3587 if (s->set_port(s, port) < 0)
3588 return -PA_ERR_NOENTITY;
3590 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3592 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3594 s->active_port = port;
3595 s->save_port = save;
3597 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3599 /* The active port affects the default sink selection. */
3600 pa_core_update_default_sink(s->core);
3602 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3604 s->port_changing = false;
3609 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3610 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3614 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3617 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3619 if (pa_streq(ff, "microphone"))
3620 t = "audio-input-microphone";
3621 else if (pa_streq(ff, "webcam"))
3623 else if (pa_streq(ff, "computer"))
3625 else if (pa_streq(ff, "handset"))
3627 else if (pa_streq(ff, "portable"))
3628 t = "multimedia-player";
3629 else if (pa_streq(ff, "tv"))
3630 t = "video-display";
3633 * The following icons are not part of the icon naming spec,
3634 * because Rodney Dawes sucks as the maintainer of that spec.
3636 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3638 else if (pa_streq(ff, "headset"))
3639 t = "audio-headset";
3640 else if (pa_streq(ff, "headphone"))
3641 t = "audio-headphones";
3642 else if (pa_streq(ff, "speaker"))
3643 t = "audio-speakers";
3644 else if (pa_streq(ff, "hands-free"))
3645 t = "audio-handsfree";
3649 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3650 if (pa_streq(c, "modem"))
3657 t = "audio-input-microphone";
3660 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3661 if (strstr(profile, "analog"))
3663 else if (strstr(profile, "iec958"))
3665 else if (strstr(profile, "hdmi"))
3669 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3671 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3676 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3677 const char *s, *d = NULL, *k;
3680 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3684 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3688 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3689 if (pa_streq(s, "internal"))
3690 d = _("Built-in Audio");
3693 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3694 if (pa_streq(s, "modem"))
3698 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3703 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3706 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3708 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3713 bool pa_device_init_intended_roles(pa_proplist *p) {
3717 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3720 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3721 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3722 || pa_streq(s, "headset")) {
3723 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3730 unsigned pa_device_init_priority(pa_proplist *p) {
3732 unsigned priority = 0;
3736 /* JACK sinks and sources get very high priority so that we'll switch the
3737 * default devices automatically when jackd starts and
3738 * module-jackdbus-detect creates the jack sink and source. */
3739 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_API))) {
3740 if (pa_streq(s, "jack"))
3744 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3746 if (pa_streq(s, "sound"))
3748 else if (!pa_streq(s, "modem"))
3752 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3754 if (pa_streq(s, "headphone"))
3756 else if (pa_streq(s, "hifi"))
3758 else if (pa_streq(s, "speaker"))
3760 else if (pa_streq(s, "portable"))
3764 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3766 if (pa_streq(s, "bluetooth"))
3768 else if (pa_streq(s, "usb"))
3770 else if (pa_streq(s, "pci"))
3774 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3776 if (pa_startswith(s, "analog-")) {
3779 /* If an analog device has an intended role of "phone", it probably
3780 * co-exists with another device that is meant for everything else,
3781 * and that other device should have higher priority than the phone
3783 if (pa_str_in_list_spaces(pa_proplist_gets(p, PA_PROP_DEVICE_INTENDED_ROLES), "phone"))
3786 else if (pa_startswith(s, "iec958-"))
3793 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3795 /* Called from the IO thread. */
3796 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3797 pa_sink_volume_change *c;
3798 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3799 c = pa_xnew(pa_sink_volume_change, 1);
3801 PA_LLIST_INIT(pa_sink_volume_change, c);
3803 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3807 /* Called from the IO thread. */
3808 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3810 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3814 /* Called from the IO thread. */
3815 void pa_sink_volume_change_push(pa_sink *s) {
3816 pa_sink_volume_change *c = NULL;
3817 pa_sink_volume_change *nc = NULL;
3818 pa_sink_volume_change *pc = NULL;
3819 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3821 const char *direction = NULL;
3824 nc = pa_sink_volume_change_new(s);
3826 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3827 * Adding one more volume for HW would get us rid of this, but I am trying
3828 * to survive with the ones we already have. */
3829 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3831 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3832 pa_log_debug("Volume not changing");
3833 pa_sink_volume_change_free(nc);
3837 nc->at = pa_sink_get_latency_within_thread(s, false);
3838 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3840 if (s->thread_info.volume_changes_tail) {
3841 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3842 /* If volume is going up let's do it a bit late. If it is going
3843 * down let's do it a bit early. */
3844 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3845 if (nc->at + safety_margin > c->at) {
3846 nc->at += safety_margin;
3851 else if (nc->at - safety_margin > c->at) {
3852 nc->at -= safety_margin;
3860 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3861 nc->at += safety_margin;
3864 nc->at -= safety_margin;
3867 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3870 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3873 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3875 /* We can ignore volume events that came earlier but should happen later than this. */
3876 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3877 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3878 pa_sink_volume_change_free(c);
3881 s->thread_info.volume_changes_tail = nc;
3884 /* Called from the IO thread. */
3885 static void pa_sink_volume_change_flush(pa_sink *s) {
3886 pa_sink_volume_change *c = s->thread_info.volume_changes;
3888 s->thread_info.volume_changes = NULL;
3889 s->thread_info.volume_changes_tail = NULL;
3891 pa_sink_volume_change *next = c->next;
3892 pa_sink_volume_change_free(c);
3897 /* Called from the IO thread. */
3898 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3904 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3910 pa_assert(s->write_volume);
3912 now = pa_rtclock_now();
3914 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3915 pa_sink_volume_change *c = s->thread_info.volume_changes;
3916 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3917 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3918 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3920 s->thread_info.current_hw_volume = c->hw_volume;
3921 pa_sink_volume_change_free(c);
3927 if (s->thread_info.volume_changes) {
3929 *usec_to_next = s->thread_info.volume_changes->at - now;
3930 if (pa_log_ratelimit(PA_LOG_DEBUG))
3931 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3936 s->thread_info.volume_changes_tail = NULL;
3941 /* Called from the IO thread. */
3942 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3943 /* All the queued volume events later than current latency are shifted to happen earlier. */
3944 pa_sink_volume_change *c;
3945 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3946 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3947 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3949 pa_log_debug("latency = %lld", (long long) limit);
3950 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3952 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3953 pa_usec_t modified_limit = limit;
3954 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3955 modified_limit -= s->thread_info.volume_change_safety_margin;
3957 modified_limit += s->thread_info.volume_change_safety_margin;
3958 if (c->at > modified_limit) {
3960 if (c->at < modified_limit)
3961 c->at = modified_limit;
3963 prev_vol = pa_cvolume_avg(&c->hw_volume);
3965 pa_sink_volume_change_apply(s, NULL);
3968 /* Called from the main thread */
3969 /* Gets the list of formats supported by the sink. The members and idxset must
3970 * be freed by the caller. */
3971 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3976 if (s->get_formats) {
3977 /* Sink supports format query, all is good */
3978 ret = s->get_formats(s);
3980 /* Sink doesn't support format query, so assume it does PCM */
3981 pa_format_info *f = pa_format_info_new();
3982 f->encoding = PA_ENCODING_PCM;
3984 ret = pa_idxset_new(NULL, NULL);
3985 pa_idxset_put(ret, f, NULL);
3991 /* Called from the main thread */
3992 /* Allows an external source to set what formats a sink supports if the sink
3993 * permits this. The function makes a copy of the formats on success. */
3994 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3999 /* Sink supports setting formats -- let's give it a shot */
4000 return s->set_formats(s, formats);
4002 /* Sink doesn't support setting this -- bail out */
4006 /* Called from the main thread */
4007 /* Checks if the sink can accept this format */
4008 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
4009 pa_idxset *formats = NULL;
4015 formats = pa_sink_get_formats(s);
4018 pa_format_info *finfo_device;
4021 PA_IDXSET_FOREACH(finfo_device, formats, i) {
4022 if (pa_format_info_is_compatible(finfo_device, f)) {
4028 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
4034 /* Called from the main thread */
4035 /* Calculates the intersection between formats supported by the sink and
4036 * in_formats, and returns these, in the order of the sink's formats. */
4037 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
4038 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
4039 pa_format_info *f_sink, *f_in;
4044 if (!in_formats || pa_idxset_isempty(in_formats))
4047 sink_formats = pa_sink_get_formats(s);
4049 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
4050 PA_IDXSET_FOREACH(f_in, in_formats, j) {
4051 if (pa_format_info_is_compatible(f_sink, f_in))
4052 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
4058 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
4063 /* Called from the main thread */
4064 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
4065 pa_sample_format_t old_format;
4068 pa_assert(pa_sample_format_valid(format));
4070 old_format = s->sample_spec.format;
4071 if (old_format == format)
4074 pa_log_info("%s: format: %s -> %s",
4075 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
4077 s->sample_spec.format = format;
4079 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4082 /* Called from the main thread */
4083 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
4087 pa_assert(pa_sample_rate_valid(rate));
4089 old_rate = s->sample_spec.rate;
4090 if (old_rate == rate)
4093 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
4095 s->sample_spec.rate = rate;
4097 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4100 /* Called from the main thread. */
4101 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4102 pa_cvolume old_volume;
4103 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4104 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4109 old_volume = s->reference_volume;
4111 if (pa_cvolume_equal(volume, &old_volume))
4114 s->reference_volume = *volume;
4115 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4116 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4117 s->flags & PA_SINK_DECIBEL_VOLUME),
4118 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4119 s->flags & PA_SINK_DECIBEL_VOLUME));
4121 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4122 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
4125 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
4130 pa_assert(old_sink);
4132 if (core->state == PA_CORE_SHUTDOWN)
4135 if (core->default_sink == NULL || core->default_sink->unlink_requested)
4138 if (old_sink == core->default_sink)
4141 PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
4142 if (!PA_SINK_INPUT_IS_LINKED(i->state))
4148 /* Don't move sink-inputs which connect filter sinks to their target sinks */
4152 /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
4153 if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
4156 if (!pa_sink_input_may_move_to(i, core->default_sink))
4159 if (default_sink_changed)
4160 pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
4161 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4163 pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
4164 i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4166 pa_sink_input_move_to(i, core->default_sink, false);