2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #include <pulse/introspect.h>
33 #include <pulse/format.h>
34 #include <pulse/utf8.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/timeval.h>
37 #include <pulse/util.h>
38 #include <pulse/rtclock.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/sink-input.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/mix.h>
47 #include <pulsecore/core-subscribe.h>
48 #include <pulsecore/log.h>
49 #include <pulsecore/macro.h>
50 #include <pulsecore/play-memblockq.h>
51 #include <pulsecore/flist.h>
55 #define MAX_MIX_CHANNELS 32
56 #define MIX_BUFFER_LENGTH (pa_page_size())
57 #define ABSOLUTE_MIN_LATENCY (500)
58 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
59 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
61 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
63 struct pa_sink_volume_change {
67 PA_LLIST_FIELDS(pa_sink_volume_change);
70 struct sink_message_set_port {
75 static void sink_free(pa_object *s);
77 static void pa_sink_volume_change_push(pa_sink *s);
78 static void pa_sink_volume_change_flush(pa_sink *s);
79 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
82 static void pa_sink_write_pcm_dump(pa_sink *s, pa_memchunk *chunk)
84 char *dump_time = NULL, *dump_path_surfix = NULL;
85 const char *s_device_api_str, *card_name_str, *device_idx_str;
90 /* open file for dump pcm */
91 if (s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && !s->pcm_dump_fp && s->state == PA_SINK_RUNNING) {
92 pa_gettimeofday(&now);
93 localtime_r(&now.tv_sec, &tm);
94 memset(&datetime[0], 0x00, sizeof(datetime));
95 strftime(&datetime[0], sizeof(datetime), "%H%M%S", &tm);
96 dump_time = pa_sprintf_malloc("%s.%03ld", &datetime[0], now.tv_usec / 1000);
98 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
99 if (pa_streq(s_device_api_str, "alsa")) {
100 card_name_str = pa_proplist_gets(s->proplist, "alsa.card_name");
101 device_idx_str = pa_proplist_gets(s->proplist, "alsa.device");
102 dump_path_surfix = pa_sprintf_malloc("%s.%s", pa_strnull(card_name_str), pa_strnull(device_idx_str));
104 dump_path_surfix = pa_sprintf_malloc("%s", s_device_api_str);
107 dump_path_surfix = pa_sprintf_malloc("%s", s->name);
110 s->dump_path = pa_sprintf_malloc("%s_%s_pa-sink%d-%s_%dch_%d.raw", PA_PCM_DUMP_PATH_PREFIX, pa_strempty(dump_time),
111 s->index, pa_strempty(dump_path_surfix), s->sample_spec.channels, s->sample_spec.rate);
113 s->pcm_dump_fp = fopen(s->dump_path, "w");
115 pa_log_warn("%s open failed", s->dump_path);
117 pa_log_info("%s opened", s->dump_path);
120 pa_xfree(dump_path_surfix);
121 /* close file for dump pcm when config is changed */
122 } else if (~s->core->pcm_dump & PA_PCM_DUMP_PA_SINK && s->pcm_dump_fp) {
123 fclose(s->pcm_dump_fp);
124 pa_log_info("%s closed", s->dump_path);
125 pa_xfree(s->dump_path);
126 s->pcm_dump_fp = NULL;
130 if (s->pcm_dump_fp) {
133 ptr = pa_memblock_acquire(chunk->memblock);
135 fwrite((uint8_t *)ptr + chunk->index, 1, chunk->length, s->pcm_dump_fp);
137 pa_log_warn("pa_memblock_acquire is failed. ptr is NULL");
139 pa_memblock_release(chunk->memblock);
144 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
148 data->proplist = pa_proplist_new();
149 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
154 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
157 pa_xfree(data->name);
158 data->name = pa_xstrdup(name);
161 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
164 if ((data->sample_spec_is_set = !!spec))
165 data->sample_spec = *spec;
168 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
171 if ((data->channel_map_is_set = !!map))
172 data->channel_map = *map;
175 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
178 data->alternate_sample_rate_is_set = true;
179 data->alternate_sample_rate = alternate_sample_rate;
182 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
185 if ((data->volume_is_set = !!volume))
186 data->volume = *volume;
189 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
192 data->muted_is_set = true;
196 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
199 pa_xfree(data->active_port);
200 data->active_port = pa_xstrdup(port);
203 void pa_sink_new_data_done(pa_sink_new_data *data) {
206 pa_proplist_free(data->proplist);
209 pa_hashmap_free(data->ports);
211 pa_xfree(data->name);
212 pa_xfree(data->active_port);
215 /* Called from main context */
216 static void reset_callbacks(pa_sink *s) {
220 s->get_volume = NULL;
221 s->set_volume = NULL;
222 s->write_volume = NULL;
225 s->request_rewind = NULL;
226 s->update_requested_latency = NULL;
228 s->get_formats = NULL;
229 s->set_formats = NULL;
230 s->reconfigure = NULL;
233 /* Called from main context */
234 pa_sink* pa_sink_new(
236 pa_sink_new_data *data,
237 pa_sink_flags_t flags) {
241 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
242 pa_source_new_data source_data;
248 pa_assert(data->name);
249 pa_assert_ctl_context();
251 s = pa_msgobject_new(pa_sink);
253 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
254 pa_log_debug("Failed to register name %s.", data->name);
259 pa_sink_new_data_set_name(data, name);
261 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
263 pa_namereg_unregister(core, name);
267 /* FIXME, need to free s here on failure */
269 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
270 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
272 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
274 if (!data->channel_map_is_set)
275 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
277 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
278 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
280 /* FIXME: There should probably be a general function for checking whether
281 * the sink volume is allowed to be set, like there is for sink inputs. */
282 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
284 if (!data->volume_is_set) {
285 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
286 data->save_volume = false;
289 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
290 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
292 if (!data->muted_is_set)
296 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
298 pa_device_init_description(data->proplist, data->card);
299 pa_device_init_icon(data->proplist, true);
300 pa_device_init_intended_roles(data->proplist);
302 if (!data->active_port) {
303 pa_device_port *p = pa_device_port_find_best(data->ports);
305 pa_sink_new_data_set_port(data, p->name);
308 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
310 pa_namereg_unregister(core, name);
314 s->parent.parent.free = sink_free;
315 s->parent.process_msg = pa_sink_process_msg;
318 s->state = PA_SINK_INIT;
321 s->suspend_cause = data->suspend_cause;
322 pa_sink_set_mixer_dirty(s, false);
323 s->name = pa_xstrdup(name);
324 s->proplist = pa_proplist_copy(data->proplist);
325 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
326 s->module = data->module;
327 s->card = data->card;
329 s->priority = pa_device_init_priority(s->proplist);
331 s->sample_spec = data->sample_spec;
332 s->channel_map = data->channel_map;
333 s->default_sample_rate = s->sample_spec.rate;
335 if (data->alternate_sample_rate_is_set)
336 s->alternate_sample_rate = data->alternate_sample_rate;
338 s->alternate_sample_rate = s->core->alternate_sample_rate;
340 s->avoid_resampling = data->avoid_resampling;
341 s->origin_avoid_resampling = data->avoid_resampling;
342 s->selected_sample_format = s->sample_spec.format;
343 s->selected_sample_rate = s->sample_spec.rate;
346 s->inputs = pa_idxset_new(NULL, NULL);
348 s->input_to_master = NULL;
350 s->reference_volume = s->real_volume = data->volume;
351 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
352 s->base_volume = PA_VOLUME_NORM;
353 s->n_volume_steps = PA_VOLUME_NORM+1;
354 s->muted = data->muted;
355 s->refresh_volume = s->refresh_muted = false;
362 /* As a minor optimization we just steal the list instead of
364 s->ports = data->ports;
367 s->active_port = NULL;
368 s->save_port = false;
370 if (data->active_port)
371 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
372 s->save_port = data->save_port;
374 /* Hopefully the active port has already been assigned in the previous call
375 to pa_device_port_find_best, but better safe than sorry */
377 s->active_port = pa_device_port_find_best(s->ports);
380 s->port_latency_offset = s->active_port->latency_offset;
382 s->port_latency_offset = 0;
384 s->save_volume = data->save_volume;
385 s->save_muted = data->save_muted;
386 #ifdef TIZEN_PCM_DUMP
387 s->pcm_dump_fp = NULL;
391 pa_silence_memchunk_get(
392 &core->silence_cache,
398 s->thread_info.rtpoll = NULL;
399 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
400 (pa_free_cb_t) pa_sink_input_unref);
401 s->thread_info.soft_volume = s->soft_volume;
402 s->thread_info.soft_muted = s->muted;
403 s->thread_info.state = s->state;
404 s->thread_info.rewind_nbytes = 0;
405 s->thread_info.rewind_requested = false;
406 s->thread_info.max_rewind = 0;
407 s->thread_info.max_request = 0;
408 s->thread_info.requested_latency_valid = false;
409 s->thread_info.requested_latency = 0;
410 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
411 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
412 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
414 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
415 s->thread_info.volume_changes_tail = NULL;
416 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
417 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
418 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
419 s->thread_info.port_latency_offset = s->port_latency_offset;
421 /* FIXME: This should probably be moved to pa_sink_put() */
422 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
425 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
427 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
428 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
431 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
432 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
436 pa_source_new_data_init(&source_data);
437 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
438 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
439 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
440 source_data.name = pa_sprintf_malloc("%s.monitor", name);
441 source_data.driver = data->driver;
442 source_data.module = data->module;
443 source_data.card = data->card;
445 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
446 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
447 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
449 s->monitor_source = pa_source_new(core, &source_data,
450 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
451 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
453 pa_source_new_data_done(&source_data);
455 if (!s->monitor_source) {
461 s->monitor_source->monitor_of = s;
463 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
464 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
465 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
470 /* Called from main context */
471 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
474 bool suspend_cause_changed;
479 pa_assert_ctl_context();
481 state_changed = state != s->state;
482 suspend_cause_changed = suspend_cause != s->suspend_cause;
484 if (!state_changed && !suspend_cause_changed)
487 suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
488 resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
490 /* If we are resuming, suspend_cause must be 0. */
491 pa_assert(!resuming || !suspend_cause);
493 /* Here's something to think about: what to do with the suspend cause if
494 * resuming the sink fails? The old suspend cause will be incorrect, so we
495 * can't use that. On the other hand, if we set no suspend cause (as is the
496 * case currently), then it looks strange to have a sink suspended without
497 * any cause. It might be a good idea to add a new "resume failed" suspend
498 * cause, or it might just add unnecessary complexity, given that the
499 * current approach of not setting any suspend cause works well enough. */
501 if (s->set_state && state_changed) {
502 ret = s->set_state(s, state);
503 /* set_state() is allowed to fail only when resuming. */
504 pa_assert(ret >= 0 || resuming);
507 if (ret >= 0 && s->asyncmsgq && state_changed)
508 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
509 /* SET_STATE is allowed to fail only when resuming. */
513 s->set_state(s, PA_SINK_SUSPENDED);
516 #ifdef TIZEN_PCM_DUMP
517 /* close file for dump pcm */
518 if (s->pcm_dump_fp && (s->core->pcm_dump_option & PA_PCM_DUMP_OPTION_SEPARATED) && suspending) {
519 fclose(s->pcm_dump_fp);
520 pa_log_info("%s closed", s->dump_path);
521 pa_xfree(s->dump_path);
522 s->pcm_dump_fp = NULL;
525 if (suspend_cause_changed) {
526 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
527 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
529 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
530 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
531 s->suspend_cause = suspend_cause;
538 pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
541 /* If we enter UNLINKED state, then we don't send change notifications.
542 * pa_sink_unlink() will send unlink notifications instead. */
543 if (state != PA_SINK_UNLINKED) {
544 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
545 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
549 if (suspending || resuming) {
553 /* We're suspending or resuming, tell everyone about it */
555 PA_IDXSET_FOREACH(i, s->inputs, idx)
556 if (s->state == PA_SINK_SUSPENDED &&
557 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
558 pa_sink_input_kill(i);
560 i->suspend(i, state == PA_SINK_SUSPENDED);
564 if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
565 pa_source_sync_suspend(s->monitor_source);
570 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
576 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
577 pa_sink_flags_t flags;
580 pa_assert(!s->write_volume || cb);
584 /* Save the current flags so we can tell if they've changed */
588 /* The sink implementor is responsible for setting decibel volume support */
589 s->flags |= PA_SINK_HW_VOLUME_CTRL;
591 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
592 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
593 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
596 /* If the flags have changed after init, let any clients know via a change event */
597 if (s->state != PA_SINK_INIT && flags != s->flags)
598 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
601 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
602 pa_sink_flags_t flags;
605 pa_assert(!cb || s->set_volume);
607 s->write_volume = cb;
609 /* Save the current flags so we can tell if they've changed */
613 s->flags |= PA_SINK_DEFERRED_VOLUME;
615 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
617 /* If the flags have changed after init, let any clients know via a change event */
618 if (s->state != PA_SINK_INIT && flags != s->flags)
619 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
622 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
628 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
629 pa_sink_flags_t flags;
635 /* Save the current flags so we can tell if they've changed */
639 s->flags |= PA_SINK_HW_MUTE_CTRL;
641 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
643 /* If the flags have changed after init, let any clients know via a change event */
644 if (s->state != PA_SINK_INIT && flags != s->flags)
645 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
648 static void enable_flat_volume(pa_sink *s, bool enable) {
649 pa_sink_flags_t flags;
653 /* Always follow the overall user preference here */
654 enable = enable && s->core->flat_volumes;
656 /* Save the current flags so we can tell if they've changed */
660 s->flags |= PA_SINK_FLAT_VOLUME;
662 s->flags &= ~PA_SINK_FLAT_VOLUME;
664 /* If the flags have changed after init, let any clients know via a change event */
665 if (s->state != PA_SINK_INIT && flags != s->flags)
666 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
669 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
670 pa_sink_flags_t flags;
674 /* Save the current flags so we can tell if they've changed */
678 s->flags |= PA_SINK_DECIBEL_VOLUME;
679 enable_flat_volume(s, true);
681 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
682 enable_flat_volume(s, false);
685 /* If the flags have changed after init, let any clients know via a change event */
686 if (s->state != PA_SINK_INIT && flags != s->flags)
687 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
690 /* Called from main context */
691 void pa_sink_put(pa_sink* s) {
692 pa_sink_assert_ref(s);
693 pa_assert_ctl_context();
695 pa_assert(s->state == PA_SINK_INIT);
696 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
698 /* The following fields must be initialized properly when calling _put() */
699 pa_assert(s->asyncmsgq);
700 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
702 /* Generally, flags should be initialized via pa_sink_new(). As a
703 * special exception we allow some volume related flags to be set
704 * between _new() and _put() by the callback setter functions above.
706 * Thus we implement a couple safeguards here which ensure the above
707 * setters were used (or at least the implementor made manual changes
708 * in a compatible way).
710 * Note: All of these flags set here can change over the life time
712 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
713 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
714 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
716 /* XXX: Currently decibel volume is disabled for all sinks that use volume
717 * sharing. When the master sink supports decibel volume, it would be good
718 * to have the flag also in the filter sink, but currently we don't do that
719 * so that the flags of the filter sink never change when it's moved from
720 * a master sink to another. One solution for this problem would be to
721 * remove user-visible volume altogether from filter sinks when volume
722 * sharing is used, but the current approach was easier to implement... */
723 /* We always support decibel volumes in software, otherwise we leave it to
724 * the sink implementor to set this flag as needed.
726 * Note: This flag can also change over the life time of the sink. */
727 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
728 pa_sink_enable_decibel_volume(s, true);
729 s->soft_volume = s->reference_volume;
732 /* If the sink implementor support DB volumes by itself, we should always
733 * try and enable flat volumes too */
734 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
735 enable_flat_volume(s, true);
737 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
738 pa_sink *root_sink = pa_sink_get_master(s);
740 pa_assert(root_sink);
742 s->reference_volume = root_sink->reference_volume;
743 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
745 s->real_volume = root_sink->real_volume;
746 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
748 /* We assume that if the sink implementor changed the default
749 * volume he did so in real_volume, because that is the usual
750 * place where he is supposed to place his changes. */
751 s->reference_volume = s->real_volume;
753 s->thread_info.soft_volume = s->soft_volume;
754 s->thread_info.soft_muted = s->muted;
755 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
757 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
758 || (s->base_volume == PA_VOLUME_NORM
759 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
760 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
761 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
762 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
763 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
765 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
766 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
767 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
769 if (s->suspend_cause)
770 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
772 pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
774 pa_source_put(s->monitor_source);
776 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
777 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
779 /* This function must be called after the PA_CORE_HOOK_SINK_PUT hook,
780 * because module-switch-on-connect needs to know the old default sink */
781 pa_core_update_default_sink(s->core);
784 /* Called from main context */
785 void pa_sink_unlink(pa_sink* s) {
787 pa_sink_input *i, PA_UNUSED *j = NULL;
789 pa_sink_assert_ref(s);
790 pa_assert_ctl_context();
792 /* Please note that pa_sink_unlink() does more than simply
793 * reversing pa_sink_put(). It also undoes the registrations
794 * already done in pa_sink_new()! */
796 if (s->unlink_requested)
799 s->unlink_requested = true;
801 linked = PA_SINK_IS_LINKED(s->state);
804 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
806 if (s->state != PA_SINK_UNLINKED)
807 pa_namereg_unregister(s->core, s->name);
808 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
810 pa_core_update_default_sink(s->core);
813 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
815 while ((i = pa_idxset_first(s->inputs, NULL))) {
817 pa_sink_input_kill(i);
822 sink_set_state(s, PA_SINK_UNLINKED, 0);
824 s->state = PA_SINK_UNLINKED;
828 if (s->monitor_source)
829 pa_source_unlink(s->monitor_source);
832 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
833 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
837 /* Called from main context */
838 static void sink_free(pa_object *o) {
839 pa_sink *s = PA_SINK(o);
842 pa_assert_ctl_context();
843 pa_assert(pa_sink_refcnt(s) == 0);
844 pa_assert(!PA_SINK_IS_LINKED(s->state));
846 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
848 pa_sink_volume_change_flush(s);
850 if (s->monitor_source) {
851 pa_source_unref(s->monitor_source);
852 s->monitor_source = NULL;
855 pa_idxset_free(s->inputs, NULL);
856 pa_hashmap_free(s->thread_info.inputs);
858 if (s->silence.memblock)
859 pa_memblock_unref(s->silence.memblock);
865 pa_proplist_free(s->proplist);
868 pa_hashmap_free(s->ports);
870 #ifdef TIZEN_PCM_DUMP
871 /* close file for dump pcm */
872 if (s->pcm_dump_fp) {
873 fclose(s->pcm_dump_fp);
874 pa_log_info("%s closed", s->dump_path);
875 pa_xfree(s->dump_path);
876 s->pcm_dump_fp = NULL;
882 /* Called from main context, and not while the IO thread is active, please */
883 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
884 pa_sink_assert_ref(s);
885 pa_assert_ctl_context();
889 if (s->monitor_source)
890 pa_source_set_asyncmsgq(s->monitor_source, q);
893 /* Called from main context, and not while the IO thread is active, please */
894 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
895 pa_sink_flags_t old_flags;
896 pa_sink_input *input;
899 pa_sink_assert_ref(s);
900 pa_assert_ctl_context();
902 /* For now, allow only a minimal set of flags to be changed. */
903 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
905 old_flags = s->flags;
906 s->flags = (s->flags & ~mask) | (value & mask);
908 if (s->flags == old_flags)
911 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
912 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
914 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
915 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
916 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
918 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
919 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
921 if (s->monitor_source)
922 pa_source_update_flags(s->monitor_source,
923 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
924 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
925 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
926 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
928 PA_IDXSET_FOREACH(input, s->inputs, idx) {
929 if (input->origin_sink)
930 pa_sink_update_flags(input->origin_sink, mask, value);
934 /* Called from IO context, or before _put() from main context */
935 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
936 pa_sink_assert_ref(s);
937 pa_sink_assert_io_context(s);
939 s->thread_info.rtpoll = p;
941 if (s->monitor_source)
942 pa_source_set_rtpoll(s->monitor_source, p);
945 /* Called from main context */
946 int pa_sink_update_status(pa_sink*s) {
947 pa_sink_assert_ref(s);
948 pa_assert_ctl_context();
949 pa_assert(PA_SINK_IS_LINKED(s->state));
951 if (s->state == PA_SINK_SUSPENDED)
954 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
957 /* Called from any context - must be threadsafe */
958 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
959 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
962 /* Called from main context */
963 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
964 pa_suspend_cause_t merged_cause;
966 pa_sink_assert_ref(s);
967 pa_assert_ctl_context();
968 pa_assert(PA_SINK_IS_LINKED(s->state));
969 pa_assert(cause != 0);
972 merged_cause = s->suspend_cause | cause;
974 merged_cause = s->suspend_cause & ~cause;
976 if (!(merged_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
977 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
978 it'll be handled just fine. */
979 pa_sink_set_mixer_dirty(s, false);
980 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
981 if (s->active_port && s->set_port) {
982 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
983 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
984 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
987 s->set_port(s, s->active_port);
998 return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
1000 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
1003 /* Called from main context */
1004 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1005 pa_sink_input *i, *n;
1008 pa_sink_assert_ref(s);
1009 pa_assert_ctl_context();
1010 pa_assert(PA_SINK_IS_LINKED(s->state));
1015 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1016 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1018 pa_sink_input_ref(i);
1020 if (pa_sink_input_start_move(i) >= 0)
1021 pa_queue_push(q, i);
1023 pa_sink_input_unref(i);
1029 /* Called from main context */
1030 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1033 pa_sink_assert_ref(s);
1034 pa_assert_ctl_context();
1035 pa_assert(PA_SINK_IS_LINKED(s->state));
1038 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1039 if (PA_SINK_INPUT_IS_LINKED(i->state)) {
1040 if (pa_sink_input_finish_move(i, s, save) < 0)
1041 pa_sink_input_fail_move(i);
1044 pa_sink_input_unref(i);
1047 pa_queue_free(q, NULL);
1050 /* Called from main context */
1051 void pa_sink_move_all_fail(pa_queue *q) {
1054 pa_assert_ctl_context();
1057 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1058 pa_sink_input_fail_move(i);
1059 pa_sink_input_unref(i);
1062 pa_queue_free(q, NULL);
1065 /* Called from IO thread context */
1066 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1071 pa_sink_assert_ref(s);
1072 pa_sink_assert_io_context(s);
1074 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1075 size_t uf = i->thread_info.underrun_for_sink;
1077 /* Propagate down the filter tree */
1078 if (i->origin_sink) {
1079 size_t filter_result, left_to_play_origin;
1081 /* The recursive call works in the origin sink domain ... */
1082 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1084 /* .. and returns the time to sleep before waking up. We need the
1085 * underrun duration for comparisons, so we undo the subtraction on
1086 * the return value... */
1087 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1089 /* ... and convert it back to the master sink domain */
1090 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1092 /* Remember the longest underrun so far */
1093 if (filter_result > result)
1094 result = filter_result;
1098 /* No underrun here, move on */
1100 } else if (uf >= left_to_play) {
1101 /* The sink has possibly consumed all the data the sink input provided */
1102 pa_sink_input_process_underrun(i);
1103 } else if (uf > result) {
1104 /* Remember the longest underrun so far */
1110 pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1111 (long) result, (long) left_to_play - result);
1112 return left_to_play - result;
1115 /* Called from IO thread context */
1116 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1120 pa_sink_assert_ref(s);
1121 pa_sink_assert_io_context(s);
1122 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1124 /* If nobody requested this and this is actually no real rewind
1125 * then we can short cut this. Please note that this means that
1126 * not all rewind requests triggered upstream will always be
1127 * translated in actual requests! */
1128 if (!s->thread_info.rewind_requested && nbytes <= 0)
1131 s->thread_info.rewind_nbytes = 0;
1132 s->thread_info.rewind_requested = false;
1135 pa_log_debug("Processing rewind...");
1136 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1137 pa_sink_volume_change_rewind(s, nbytes);
1138 #ifdef TIZEN_PCM_DUMP
1141 fseeko(s->pcm_dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1145 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1146 pa_sink_input_assert_ref(i);
1147 pa_sink_input_process_rewind(i, nbytes);
1151 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1152 pa_source_process_rewind(s->monitor_source, nbytes);
1156 /* Called from IO thread context */
1157 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1161 size_t mixlength = *length;
1163 pa_sink_assert_ref(s);
1164 pa_sink_assert_io_context(s);
1167 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1168 pa_sink_input_assert_ref(i);
1170 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1172 if (mixlength == 0 || info->chunk.length < mixlength)
1173 mixlength = info->chunk.length;
1175 if (pa_memblock_is_silence(info->chunk.memblock)) {
1176 pa_memblock_unref(info->chunk.memblock);
1180 info->userdata = pa_sink_input_ref(i);
1182 pa_assert(info->chunk.memblock);
1183 pa_assert(info->chunk.length > 0);
1191 *length = mixlength;
1196 /* Called from IO thread context */
1197 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1201 unsigned n_unreffed = 0;
1203 pa_sink_assert_ref(s);
1204 pa_sink_assert_io_context(s);
1206 pa_assert(result->memblock);
1207 pa_assert(result->length > 0);
1209 /* We optimize for the case where the order of the inputs has not changed */
1211 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1213 pa_mix_info* m = NULL;
1215 pa_sink_input_assert_ref(i);
1217 /* Let's try to find the matching entry info the pa_mix_info array */
1218 for (j = 0; j < n; j ++) {
1220 if (info[p].userdata == i) {
1230 /* Drop read data */
1231 pa_sink_input_drop(i, result->length);
1233 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1235 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1236 void *ostate = NULL;
1237 pa_source_output *o;
1240 if (m && m->chunk.memblock) {
1242 pa_memblock_ref(c.memblock);
1243 pa_assert(result->length <= c.length);
1244 c.length = result->length;
1246 pa_memchunk_make_writable(&c, 0);
1247 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1250 pa_memblock_ref(c.memblock);
1251 pa_assert(result->length <= c.length);
1252 c.length = result->length;
1255 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1256 pa_source_output_assert_ref(o);
1257 pa_assert(o->direct_on_input == i);
1258 pa_source_post_direct(s->monitor_source, o, &c);
1261 pa_memblock_unref(c.memblock);
1266 if (m->chunk.memblock) {
1267 pa_memblock_unref(m->chunk.memblock);
1268 pa_memchunk_reset(&m->chunk);
1271 pa_sink_input_unref(m->userdata);
1278 /* Now drop references to entries that are included in the
1279 * pa_mix_info array but don't exist anymore */
1281 if (n_unreffed < n) {
1282 for (; n > 0; info++, n--) {
1284 pa_sink_input_unref(info->userdata);
1285 if (info->chunk.memblock)
1286 pa_memblock_unref(info->chunk.memblock);
1290 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1291 pa_source_post(s->monitor_source, result);
1294 /* Called from IO thread context */
1295 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1296 pa_mix_info info[MAX_MIX_CHANNELS];
1298 size_t block_size_max;
1300 pa_sink_assert_ref(s);
1301 pa_sink_assert_io_context(s);
1302 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1303 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1306 pa_assert(!s->thread_info.rewind_requested);
1307 pa_assert(s->thread_info.rewind_nbytes == 0);
1309 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1310 result->memblock = pa_memblock_ref(s->silence.memblock);
1311 result->index = s->silence.index;
1312 result->length = PA_MIN(s->silence.length, length);
1319 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1321 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1322 if (length > block_size_max)
1323 length = pa_frame_align(block_size_max, &s->sample_spec);
1325 pa_assert(length > 0);
1327 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1331 *result = s->silence;
1332 pa_memblock_ref(result->memblock);
1334 if (result->length > length)
1335 result->length = length;
1337 } else if (n == 1) {
1340 *result = info[0].chunk;
1341 pa_memblock_ref(result->memblock);
1343 if (result->length > length)
1344 result->length = length;
1346 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1348 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1349 pa_memblock_unref(result->memblock);
1350 pa_silence_memchunk_get(&s->core->silence_cache,
1355 } else if (!pa_cvolume_is_norm(&volume)) {
1356 pa_memchunk_make_writable(result, 0);
1357 pa_volume_memchunk(result, &s->sample_spec, &volume);
1361 result->memblock = pa_memblock_new(s->core->mempool, length);
1363 ptr = pa_memblock_acquire(result->memblock);
1364 result->length = pa_mix(info, n,
1367 &s->thread_info.soft_volume,
1368 s->thread_info.soft_muted);
1369 pa_memblock_release(result->memblock);
1374 inputs_drop(s, info, n, result);
1376 #ifdef TIZEN_PCM_DUMP
1377 pa_sink_write_pcm_dump(s, result);
1382 /* Called from IO thread context */
1383 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1384 pa_mix_info info[MAX_MIX_CHANNELS];
1386 size_t length, block_size_max;
1388 pa_sink_assert_ref(s);
1389 pa_sink_assert_io_context(s);
1390 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1392 pa_assert(target->memblock);
1393 pa_assert(target->length > 0);
1394 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1396 pa_assert(!s->thread_info.rewind_requested);
1397 pa_assert(s->thread_info.rewind_nbytes == 0);
1399 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1400 pa_silence_memchunk(target, &s->sample_spec);
1406 length = target->length;
1407 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1408 if (length > block_size_max)
1409 length = pa_frame_align(block_size_max, &s->sample_spec);
1411 pa_assert(length > 0);
1413 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1416 if (target->length > length)
1417 target->length = length;
1419 pa_silence_memchunk(target, &s->sample_spec);
1420 } else if (n == 1) {
1423 if (target->length > length)
1424 target->length = length;
1426 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1428 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1429 pa_silence_memchunk(target, &s->sample_spec);
1433 vchunk = info[0].chunk;
1434 pa_memblock_ref(vchunk.memblock);
1436 if (vchunk.length > length)
1437 vchunk.length = length;
1439 if (!pa_cvolume_is_norm(&volume)) {
1440 pa_memchunk_make_writable(&vchunk, 0);
1441 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1444 pa_memchunk_memcpy(target, &vchunk);
1445 pa_memblock_unref(vchunk.memblock);
1451 ptr = pa_memblock_acquire(target->memblock);
1453 target->length = pa_mix(info, n,
1454 (uint8_t*) ptr + target->index, length,
1456 &s->thread_info.soft_volume,
1457 s->thread_info.soft_muted);
1459 pa_memblock_release(target->memblock);
1462 inputs_drop(s, info, n, target);
1464 #ifdef TIZEN_PCM_DUMP
1465 pa_sink_write_pcm_dump(s, target);
1470 /* Called from IO thread context */
1471 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1475 pa_sink_assert_ref(s);
1476 pa_sink_assert_io_context(s);
1477 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1479 pa_assert(target->memblock);
1480 pa_assert(target->length > 0);
1481 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1483 pa_assert(!s->thread_info.rewind_requested);
1484 pa_assert(s->thread_info.rewind_nbytes == 0);
1486 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1487 pa_silence_memchunk(target, &s->sample_spec);
1500 pa_sink_render_into(s, &chunk);
1509 /* Called from IO thread context */
1510 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1511 pa_sink_assert_ref(s);
1512 pa_sink_assert_io_context(s);
1513 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1514 pa_assert(length > 0);
1515 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1518 pa_assert(!s->thread_info.rewind_requested);
1519 pa_assert(s->thread_info.rewind_nbytes == 0);
1523 pa_sink_render(s, length, result);
1525 if (result->length < length) {
1528 pa_memchunk_make_writable(result, length);
1530 chunk.memblock = result->memblock;
1531 chunk.index = result->index + result->length;
1532 chunk.length = length - result->length;
1534 pa_sink_render_into_full(s, &chunk);
1536 result->length = length;
1542 /* Called from main thread */
1543 int pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1545 pa_sample_spec desired_spec;
1546 uint32_t default_rate = s->default_sample_rate;
1547 uint32_t alternate_rate = s->alternate_sample_rate;
1550 bool default_rate_is_usable = false;
1551 bool alternate_rate_is_usable = false;
1553 bool avoid_resampling = s->avoid_resampling;
1555 bool avoid_resampling = s->core->avoid_resampling;
1557 /* We currently only try to reconfigure the sample rate */
1561 if (pa_sample_spec_equal(spec, &s->sample_spec))
1565 if (!s->reconfigure)
1569 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1570 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1575 if (PA_SINK_IS_RUNNING(s->state)) {
1577 pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1578 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1580 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1581 s->sample_spec.rate);
1586 if (s->monitor_source) {
1587 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1589 pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1591 pa_log_info("Cannot update rate, monitor source is RUNNING");
1597 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1600 desired_spec = s->sample_spec;
1603 if (!avoid_resampling) {
1604 default_rate = alternate_rate = s->selected_sample_rate;
1605 desired_spec.format = s->selected_sample_format;
1608 /* We have to try to use the sink input format and rate */
1609 desired_spec.format = spec->format;
1610 desired_spec.rate = spec->rate;
1613 /* We have to try to use the sink input rate */
1614 desired_spec.rate = spec->rate;
1617 } else if (avoid_resampling && (spec->format != s->sample_spec.format ||
1618 spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1619 desired_spec.format = spec->format;
1621 } else if (avoid_resampling && (spec->rate >= default_rate || spec->rate >= alternate_rate)) {
1623 /* We just try to set the sink input's sample rate if it's not too low */
1624 desired_spec.rate = spec->rate;
1626 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1627 /* We can directly try to use this rate */
1628 desired_spec.rate = spec->rate;
1631 /* See if we can pick a rate that results in less resampling effort */
1632 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1633 default_rate_is_usable = true;
1634 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1635 default_rate_is_usable = true;
1636 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1637 alternate_rate_is_usable = true;
1638 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1639 alternate_rate_is_usable = true;
1641 if (alternate_rate_is_usable && !default_rate_is_usable)
1642 desired_spec.rate = alternate_rate;
1644 desired_spec.rate = default_rate;
1647 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1650 pa_log_info("desired spec is same as sink->sample_spec");
1657 if (!passthrough && pa_sink_used_by(s) > 0)
1661 pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1662 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1664 pa_log_debug("Suspending sink %s due to changing format", s->name);
1666 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1668 if (s->reconfigure(s, &desired_spec, passthrough) >= 0) {
1669 /* update monitor source as well */
1670 if (s->monitor_source && !passthrough)
1672 pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1673 pa_log_info("Reconfigured successfully");
1675 pa_source_reconfigure(s->monitor_source, &desired_spec, false);
1676 pa_log_info("Changed format successfully");
1679 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1680 if (i->state == PA_SINK_INPUT_CORKED)
1682 pa_sink_input_update_resampler(i);
1684 pa_sink_input_update_rate(i);
1691 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1696 /* Called from main thread */
1697 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1700 pa_sink_assert_ref(s);
1701 pa_assert_ctl_context();
1702 pa_assert(PA_SINK_IS_LINKED(s->state));
1704 /* The returned value is supposed to be in the time domain of the sound card! */
1706 if (s->state == PA_SINK_SUSPENDED)
1709 if (!(s->flags & PA_SINK_LATENCY))
1712 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1714 /* the return value is unsigned, so check that the offset can be added to usec without
1716 if (-s->port_latency_offset <= usec)
1717 usec += s->port_latency_offset;
1721 return (pa_usec_t)usec;
1724 /* Called from IO thread */
1725 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1729 pa_sink_assert_ref(s);
1730 pa_sink_assert_io_context(s);
1731 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1733 /* The returned value is supposed to be in the time domain of the sound card! */
1735 if (s->thread_info.state == PA_SINK_SUSPENDED)
1738 if (!(s->flags & PA_SINK_LATENCY))
1741 o = PA_MSGOBJECT(s);
1743 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1745 o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1747 /* If allow_negative is false, the call should only return positive values, */
1748 usec += s->thread_info.port_latency_offset;
1749 if (!allow_negative && usec < 0)
1755 /* Called from the main thread (and also from the IO thread while the main
1756 * thread is waiting).
1758 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1759 * set. Instead, flat volume mode is detected by checking whether the root sink
1760 * has the flag set. */
1761 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1762 pa_sink_assert_ref(s);
1764 s = pa_sink_get_master(s);
1767 return (s->flags & PA_SINK_FLAT_VOLUME);
1772 /* Called from the main thread (and also from the IO thread while the main
1773 * thread is waiting). */
1774 pa_sink *pa_sink_get_master(pa_sink *s) {
1775 pa_sink_assert_ref(s);
1777 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1778 if (PA_UNLIKELY(!s->input_to_master))
1781 s = s->input_to_master->sink;
1787 /* Called from main context */
1788 bool pa_sink_is_filter(pa_sink *s) {
1789 pa_sink_assert_ref(s);
1791 return (s->input_to_master != NULL);
1794 /* Called from main context */
1795 bool pa_sink_is_passthrough(pa_sink *s) {
1796 pa_sink_input *alt_i;
1799 pa_sink_assert_ref(s);
1801 /* one and only one PASSTHROUGH input can possibly be connected */
1802 if (pa_idxset_size(s->inputs) == 1) {
1803 alt_i = pa_idxset_first(s->inputs, &idx);
1805 if (pa_sink_input_is_passthrough(alt_i))
1812 /* Called from main context */
1813 void pa_sink_enter_passthrough(pa_sink *s) {
1816 /* The sink implementation is reconfigured for passthrough in
1817 * pa_sink_reconfigure(). This function sets the PA core objects to
1818 * passthrough mode. */
1820 /* disable the monitor in passthrough mode */
1821 if (s->monitor_source) {
1822 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1823 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1826 /* set the volume to NORM */
1827 s->saved_volume = *pa_sink_get_volume(s, true);
1828 s->saved_save_volume = s->save_volume;
1830 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1831 pa_sink_set_volume(s, &volume, true, false);
1833 pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1836 /* Called from main context */
1837 void pa_sink_leave_passthrough(pa_sink *s) {
1838 /* Unsuspend monitor */
1839 if (s->monitor_source) {
1840 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1841 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1844 /* Restore sink volume to what it was before we entered passthrough mode */
1845 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1847 pa_cvolume_init(&s->saved_volume);
1848 s->saved_save_volume = false;
1852 /* Called from main context. */
1853 static void compute_reference_ratio(pa_sink_input *i) {
1855 pa_cvolume remapped;
1859 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1862 * Calculates the reference ratio from the sink's reference
1863 * volume. This basically calculates:
1865 * i->reference_ratio = i->volume / i->sink->reference_volume
1868 remapped = i->sink->reference_volume;
1869 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1871 ratio = i->reference_ratio;
1873 for (c = 0; c < i->sample_spec.channels; c++) {
1875 /* We don't update when the sink volume is 0 anyway */
1876 if (remapped.values[c] <= PA_VOLUME_MUTED)
1879 /* Don't update the reference ratio unless necessary */
1880 if (pa_sw_volume_multiply(
1882 remapped.values[c]) == i->volume.values[c])
1885 ratio.values[c] = pa_sw_volume_divide(
1886 i->volume.values[c],
1887 remapped.values[c]);
1890 pa_sink_input_set_reference_ratio(i, &ratio);
1893 /* Called from main context. Only called for the root sink in volume sharing
1894 * cases, except for internal recursive calls. */
1895 static void compute_reference_ratios(pa_sink *s) {
1899 pa_sink_assert_ref(s);
1900 pa_assert_ctl_context();
1901 pa_assert(PA_SINK_IS_LINKED(s->state));
1902 pa_assert(pa_sink_flat_volume_enabled(s));
1904 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1905 compute_reference_ratio(i);
1907 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1908 && PA_SINK_IS_LINKED(i->origin_sink->state))
1909 compute_reference_ratios(i->origin_sink);
1913 /* Called from main context. Only called for the root sink in volume sharing
1914 * cases, except for internal recursive calls. */
1915 static void compute_real_ratios(pa_sink *s) {
1919 pa_sink_assert_ref(s);
1920 pa_assert_ctl_context();
1921 pa_assert(PA_SINK_IS_LINKED(s->state));
1922 pa_assert(pa_sink_flat_volume_enabled(s));
1924 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1926 pa_cvolume remapped;
1928 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1929 /* The origin sink uses volume sharing, so this input's real ratio
1930 * is handled as a special case - the real ratio must be 0 dB, and
1931 * as a result i->soft_volume must equal i->volume_factor. */
1932 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1933 i->soft_volume = i->volume_factor;
1935 if (PA_SINK_IS_LINKED(i->origin_sink->state))
1936 compute_real_ratios(i->origin_sink);
1942 * This basically calculates:
1944 * i->real_ratio := i->volume / s->real_volume
1945 * i->soft_volume := i->real_ratio * i->volume_factor
1948 remapped = s->real_volume;
1949 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1951 i->real_ratio.channels = i->sample_spec.channels;
1952 i->soft_volume.channels = i->sample_spec.channels;
1954 for (c = 0; c < i->sample_spec.channels; c++) {
1956 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1957 /* We leave i->real_ratio untouched */
1958 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1962 /* Don't lose accuracy unless necessary */
1963 if (pa_sw_volume_multiply(
1964 i->real_ratio.values[c],
1965 remapped.values[c]) != i->volume.values[c])
1967 i->real_ratio.values[c] = pa_sw_volume_divide(
1968 i->volume.values[c],
1969 remapped.values[c]);
1971 i->soft_volume.values[c] = pa_sw_volume_multiply(
1972 i->real_ratio.values[c],
1973 i->volume_factor.values[c]);
1976 /* We don't copy the soft_volume to the thread_info data
1977 * here. That must be done by the caller */
1981 static pa_cvolume *cvolume_remap_minimal_impact(
1983 const pa_cvolume *template,
1984 const pa_channel_map *from,
1985 const pa_channel_map *to) {
1990 pa_assert(template);
1993 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1994 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1996 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1997 * mapping from sink input to sink volumes:
1999 * If template is a possible remapping from v it is used instead
2000 * of remapping anew.
2002 * If the channel maps don't match we set an all-channel volume on
2003 * the sink to ensure that changing a volume on one stream has no
2004 * effect that cannot be compensated for in another stream that
2005 * does not have the same channel map as the sink. */
2007 if (pa_channel_map_equal(from, to))
2011 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
2016 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
2020 /* Called from main thread. Only called for the root sink in volume sharing
2021 * cases, except for internal recursive calls. */
2022 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
2026 pa_sink_assert_ref(s);
2027 pa_assert(max_volume);
2028 pa_assert(channel_map);
2029 pa_assert(pa_sink_flat_volume_enabled(s));
2031 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2032 pa_cvolume remapped;
2034 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2035 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2036 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
2038 /* Ignore this input. The origin sink uses volume sharing, so this
2039 * input's volume will be set to be equal to the root sink's real
2040 * volume. Obviously this input's current volume must not then
2041 * affect what the root sink's real volume will be. */
2045 remapped = i->volume;
2046 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
2047 pa_cvolume_merge(max_volume, max_volume, &remapped);
2051 /* Called from main thread. Only called for the root sink in volume sharing
2052 * cases, except for internal recursive calls. */
2053 static bool has_inputs(pa_sink *s) {
2057 pa_sink_assert_ref(s);
2059 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2060 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2067 /* Called from main thread. Only called for the root sink in volume sharing
2068 * cases, except for internal recursive calls. */
2069 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2073 pa_sink_assert_ref(s);
2074 pa_assert(new_volume);
2075 pa_assert(channel_map);
2077 s->real_volume = *new_volume;
2078 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2080 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2081 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2082 if (pa_sink_flat_volume_enabled(s)) {
2083 pa_cvolume new_input_volume;
2085 /* Follow the root sink's real volume. */
2086 new_input_volume = *new_volume;
2087 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2088 pa_sink_input_set_volume_direct(i, &new_input_volume);
2089 compute_reference_ratio(i);
2092 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2093 update_real_volume(i->origin_sink, new_volume, channel_map);
2098 /* Called from main thread. Only called for the root sink in shared volume
2100 static void compute_real_volume(pa_sink *s) {
2101 pa_sink_assert_ref(s);
2102 pa_assert_ctl_context();
2103 pa_assert(PA_SINK_IS_LINKED(s->state));
2104 pa_assert(pa_sink_flat_volume_enabled(s));
2105 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2107 /* This determines the maximum volume of all streams and sets
2108 * s->real_volume accordingly. */
2110 if (!has_inputs(s)) {
2111 /* In the special case that we have no sink inputs we leave the
2112 * volume unmodified. */
2113 update_real_volume(s, &s->reference_volume, &s->channel_map);
2117 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2119 /* First let's determine the new maximum volume of all inputs
2120 * connected to this sink */
2121 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2122 update_real_volume(s, &s->real_volume, &s->channel_map);
2124 /* Then, let's update the real ratios/soft volumes of all inputs
2125 * connected to this sink */
2126 compute_real_ratios(s);
2129 /* Called from main thread. Only called for the root sink in shared volume
2130 * cases, except for internal recursive calls. */
2131 static void propagate_reference_volume(pa_sink *s) {
2135 pa_sink_assert_ref(s);
2136 pa_assert_ctl_context();
2137 pa_assert(PA_SINK_IS_LINKED(s->state));
2138 pa_assert(pa_sink_flat_volume_enabled(s));
2140 /* This is called whenever the sink volume changes that is not
2141 * caused by a sink input volume change. We need to fix up the
2142 * sink input volumes accordingly */
2144 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2145 pa_cvolume new_volume;
2147 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2148 if (PA_SINK_IS_LINKED(i->origin_sink->state))
2149 propagate_reference_volume(i->origin_sink);
2151 /* Since the origin sink uses volume sharing, this input's volume
2152 * needs to be updated to match the root sink's real volume, but
2153 * that will be done later in update_real_volume(). */
2157 /* This basically calculates:
2159 * i->volume := s->reference_volume * i->reference_ratio */
2161 new_volume = s->reference_volume;
2162 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2163 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2164 pa_sink_input_set_volume_direct(i, &new_volume);
2168 /* Called from main thread. Only called for the root sink in volume sharing
2169 * cases, except for internal recursive calls. The return value indicates
2170 * whether any reference volume actually changed. */
2171 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2173 bool reference_volume_changed;
2177 pa_sink_assert_ref(s);
2178 pa_assert(PA_SINK_IS_LINKED(s->state));
2180 pa_assert(channel_map);
2181 pa_assert(pa_cvolume_valid(v));
2184 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2186 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2187 pa_sink_set_reference_volume_direct(s, &volume);
2189 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2191 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2192 /* If the root sink's volume doesn't change, then there can't be any
2193 * changes in the other sinks in the sink tree either.
2195 * It's probably theoretically possible that even if the root sink's
2196 * volume changes slightly, some filter sink doesn't change its volume
2197 * due to rounding errors. If that happens, we still want to propagate
2198 * the changed root sink volume to the sinks connected to the
2199 * intermediate sink that didn't change its volume. This theoretical
2200 * possibility is the reason why we have that !(s->flags &
2201 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2202 * notice even if we returned here false always if
2203 * reference_volume_changed is false. */
2206 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2207 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2208 && PA_SINK_IS_LINKED(i->origin_sink->state))
2209 update_reference_volume(i->origin_sink, v, channel_map, false);
2215 /* Called from main thread */
2216 void pa_sink_set_volume(
2218 const pa_cvolume *volume,
2222 pa_cvolume new_reference_volume;
2225 pa_sink_assert_ref(s);
2226 pa_assert_ctl_context();
2227 pa_assert(PA_SINK_IS_LINKED(s->state));
2228 pa_assert(!volume || pa_cvolume_valid(volume));
2229 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2230 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2232 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2233 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2234 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2235 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2239 /* In case of volume sharing, the volume is set for the root sink first,
2240 * from which it's then propagated to the sharing sinks. */
2241 root_sink = pa_sink_get_master(s);
2243 if (PA_UNLIKELY(!root_sink))
2246 /* As a special exception we accept mono volumes on all sinks --
2247 * even on those with more complex channel maps */
2250 if (pa_cvolume_compatible(volume, &s->sample_spec))
2251 new_reference_volume = *volume;
2253 new_reference_volume = s->reference_volume;
2254 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2257 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2259 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2260 if (pa_sink_flat_volume_enabled(root_sink)) {
2261 /* OK, propagate this volume change back to the inputs */
2262 propagate_reference_volume(root_sink);
2264 /* And now recalculate the real volume */
2265 compute_real_volume(root_sink);
2267 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2271 /* If volume is NULL we synchronize the sink's real and
2272 * reference volumes with the stream volumes. */
2274 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2276 /* Ok, let's determine the new real volume */
2277 compute_real_volume(root_sink);
2279 /* Let's 'push' the reference volume if necessary */
2280 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2281 /* If the sink and its root don't have the same number of channels, we need to remap */
2282 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2283 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2284 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2286 /* Now that the reference volume is updated, we can update the streams'
2287 * reference ratios. */
2288 compute_reference_ratios(root_sink);
2291 if (root_sink->set_volume) {
2292 /* If we have a function set_volume(), then we do not apply a
2293 * soft volume by default. However, set_volume() is free to
2294 * apply one to root_sink->soft_volume */
2296 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2297 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2298 root_sink->set_volume(root_sink);
2301 /* If we have no function set_volume(), then the soft volume
2302 * becomes the real volume */
2303 root_sink->soft_volume = root_sink->real_volume;
2305 /* This tells the sink that soft volume and/or real volume changed */
2307 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2310 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2311 * Only to be called by sink implementor */
2312 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2314 pa_sink_assert_ref(s);
2315 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2317 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2318 pa_sink_assert_io_context(s);
2320 pa_assert_ctl_context();
2323 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2325 s->soft_volume = *volume;
2327 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2328 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2330 s->thread_info.soft_volume = s->soft_volume;
2333 /* Called from the main thread. Only called for the root sink in volume sharing
2334 * cases, except for internal recursive calls. */
2335 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2339 pa_sink_assert_ref(s);
2340 pa_assert(old_real_volume);
2341 pa_assert_ctl_context();
2342 pa_assert(PA_SINK_IS_LINKED(s->state));
2344 /* This is called when the hardware's real volume changes due to
2345 * some external event. We copy the real volume into our
2346 * reference volume and then rebuild the stream volumes based on
2347 * i->real_ratio which should stay fixed. */
2349 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2350 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2353 /* 1. Make the real volume the reference volume */
2354 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2357 if (pa_sink_flat_volume_enabled(s)) {
2359 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2360 pa_cvolume new_volume;
2362 /* 2. Since the sink's reference and real volumes are equal
2363 * now our ratios should be too. */
2364 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2366 /* 3. Recalculate the new stream reference volume based on the
2367 * reference ratio and the sink's reference volume.
2369 * This basically calculates:
2371 * i->volume = s->reference_volume * i->reference_ratio
2373 * This is identical to propagate_reference_volume() */
2374 new_volume = s->reference_volume;
2375 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2376 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2377 pa_sink_input_set_volume_direct(i, &new_volume);
2379 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2380 && PA_SINK_IS_LINKED(i->origin_sink->state))
2381 propagate_real_volume(i->origin_sink, old_real_volume);
2385 /* Something got changed in the hardware. It probably makes sense
2386 * to save changed hw settings given that hw volume changes not
2387 * triggered by PA are almost certainly done by the user. */
2388 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2389 s->save_volume = true;
2392 /* Called from io thread */
2393 void pa_sink_update_volume_and_mute(pa_sink *s) {
2395 pa_sink_assert_io_context(s);
2397 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2400 /* Called from main thread */
2401 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2402 pa_sink_assert_ref(s);
2403 pa_assert_ctl_context();
2404 pa_assert(PA_SINK_IS_LINKED(s->state));
2406 if (s->refresh_volume || force_refresh) {
2407 struct pa_cvolume old_real_volume;
2409 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2411 old_real_volume = s->real_volume;
2413 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2416 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2418 update_real_volume(s, &s->real_volume, &s->channel_map);
2419 propagate_real_volume(s, &old_real_volume);
2422 return &s->reference_volume;
2425 /* Called from main thread. In volume sharing cases, only the root sink may
2427 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2428 pa_cvolume old_real_volume;
2430 pa_sink_assert_ref(s);
2431 pa_assert_ctl_context();
2432 pa_assert(PA_SINK_IS_LINKED(s->state));
2433 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2435 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2437 old_real_volume = s->real_volume;
2438 update_real_volume(s, new_real_volume, &s->channel_map);
2439 propagate_real_volume(s, &old_real_volume);
2442 /* Called from main thread */
2443 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2446 pa_sink_assert_ref(s);
2447 pa_assert_ctl_context();
2449 old_muted = s->muted;
2451 if (mute == old_muted) {
2452 s->save_muted |= save;
2457 s->save_muted = save;
2459 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2460 s->set_mute_in_progress = true;
2462 s->set_mute_in_progress = false;
2465 if (!PA_SINK_IS_LINKED(s->state))
2468 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2469 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2470 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2471 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2474 /* Called from main thread */
2475 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2477 pa_sink_assert_ref(s);
2478 pa_assert_ctl_context();
2479 pa_assert(PA_SINK_IS_LINKED(s->state));
2481 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2484 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2485 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2486 pa_sink_mute_changed(s, mute);
2488 if (s->get_mute(s, &mute) >= 0)
2489 pa_sink_mute_changed(s, mute);
2496 /* Called from main thread */
2497 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2498 pa_sink_assert_ref(s);
2499 pa_assert_ctl_context();
2500 pa_assert(PA_SINK_IS_LINKED(s->state));
2502 if (s->set_mute_in_progress)
2505 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2506 * but we must have this here also, because the save parameter of
2507 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2508 * the mute state when it shouldn't be saved). */
2509 if (new_muted == s->muted)
2512 pa_sink_set_mute(s, new_muted, true);
2515 /* Called from main thread */
2516 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2517 pa_sink_assert_ref(s);
2518 pa_assert_ctl_context();
2521 pa_proplist_update(s->proplist, mode, p);
2523 if (PA_SINK_IS_LINKED(s->state)) {
2524 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2525 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2531 /* Called from main thread */
2532 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2533 void pa_sink_set_description(pa_sink *s, const char *description) {
2535 pa_sink_assert_ref(s);
2536 pa_assert_ctl_context();
2538 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2541 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2543 if (old && description && pa_streq(old, description))
2547 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2549 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2551 if (s->monitor_source) {
2554 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2555 pa_source_set_description(s->monitor_source, n);
2559 if (PA_SINK_IS_LINKED(s->state)) {
2560 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2561 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2565 /* Called from main thread */
2566 unsigned pa_sink_linked_by(pa_sink *s) {
2569 pa_sink_assert_ref(s);
2570 pa_assert_ctl_context();
2571 pa_assert(PA_SINK_IS_LINKED(s->state));
2573 ret = pa_idxset_size(s->inputs);
2575 /* We add in the number of streams connected to us here. Please
2576 * note the asymmetry to pa_sink_used_by()! */
2578 if (s->monitor_source)
2579 ret += pa_source_linked_by(s->monitor_source);
2584 /* Called from main thread */
2585 unsigned pa_sink_used_by(pa_sink *s) {
2588 pa_sink_assert_ref(s);
2589 pa_assert_ctl_context();
2590 pa_assert(PA_SINK_IS_LINKED(s->state));
2592 ret = pa_idxset_size(s->inputs);
2593 pa_assert(ret >= s->n_corked);
2595 /* Streams connected to our monitor source do not matter for
2596 * pa_sink_used_by()!.*/
2598 return ret - s->n_corked;
2601 /* Called from main thread */
2602 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2607 pa_sink_assert_ref(s);
2608 pa_assert_ctl_context();
2610 if (!PA_SINK_IS_LINKED(s->state))
2615 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2616 pa_sink_input_state_t st;
2618 if (i == ignore_input)
2621 st = pa_sink_input_get_state(i);
2623 /* We do not assert here. It is perfectly valid for a sink input to
2624 * be in the INIT state (i.e. created, marked done but not yet put)
2625 * and we should not care if it's unlinked as it won't contribute
2626 * towards our busy status.
2628 if (!PA_SINK_INPUT_IS_LINKED(st))
2631 if (st == PA_SINK_INPUT_CORKED)
2634 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2640 if (s->monitor_source)
2641 ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2646 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2648 case PA_SINK_INIT: return "INIT";
2649 case PA_SINK_IDLE: return "IDLE";
2650 case PA_SINK_RUNNING: return "RUNNING";
2651 case PA_SINK_SUSPENDED: return "SUSPENDED";
2652 case PA_SINK_UNLINKED: return "UNLINKED";
2653 case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2656 pa_assert_not_reached();
2659 /* Called from the IO thread */
2660 static void sync_input_volumes_within_thread(pa_sink *s) {
2664 pa_sink_assert_ref(s);
2665 pa_sink_assert_io_context(s);
2667 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2668 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2671 i->thread_info.soft_volume = i->soft_volume;
2672 pa_sink_input_request_rewind(i, 0, true, false, false);
2676 /* Called from the IO thread. Only called for the root sink in volume sharing
2677 * cases, except for internal recursive calls. */
2678 static void set_shared_volume_within_thread(pa_sink *s) {
2679 pa_sink_input *i = NULL;
2682 pa_sink_assert_ref(s);
2684 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2686 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2687 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2688 set_shared_volume_within_thread(i->origin_sink);
2692 /* Called from IO thread, except when it is not */
2693 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2694 pa_sink *s = PA_SINK(o);
2695 pa_sink_assert_ref(s);
2697 switch ((pa_sink_message_t) code) {
2699 case PA_SINK_MESSAGE_ADD_INPUT: {
2700 pa_sink_input *i = PA_SINK_INPUT(userdata);
2702 /* If you change anything here, make sure to change the
2703 * sink input handling a few lines down at
2704 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2706 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2708 /* Since the caller sleeps in pa_sink_input_put(), we can
2709 * safely access data outside of thread_info even though
2712 if ((i->thread_info.sync_prev = i->sync_prev)) {
2713 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2714 pa_assert(i->sync_prev->sync_next == i);
2715 i->thread_info.sync_prev->thread_info.sync_next = i;
2718 if ((i->thread_info.sync_next = i->sync_next)) {
2719 pa_assert(i->sink == i->thread_info.sync_next->sink);
2720 pa_assert(i->sync_next->sync_prev == i);
2721 i->thread_info.sync_next->thread_info.sync_prev = i;
2724 pa_sink_input_attach(i);
2726 pa_sink_input_set_state_within_thread(i, i->state);
2728 /* The requested latency of the sink input needs to be fixed up and
2729 * then configured on the sink. If this causes the sink latency to
2730 * go down, the sink implementor is responsible for doing a rewind
2731 * in the update_requested_latency() callback to ensure that the
2732 * sink buffer doesn't contain more data than what the new latency
2735 * XXX: Does it really make sense to push this responsibility to
2736 * the sink implementors? Wouldn't it be better to do it once in
2737 * the core than many times in the modules? */
2739 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2740 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2742 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2743 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2745 /* We don't rewind here automatically. This is left to the
2746 * sink input implementor because some sink inputs need a
2747 * slow start, i.e. need some time to buffer client
2748 * samples before beginning streaming.
2750 * XXX: Does it really make sense to push this functionality to
2751 * the sink implementors? Wouldn't it be better to do it once in
2752 * the core than many times in the modules? */
2754 /* In flat volume mode we need to update the volume as
2756 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2759 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2760 pa_sink_input *i = PA_SINK_INPUT(userdata);
2762 /* If you change anything here, make sure to change the
2763 * sink input handling a few lines down at
2764 * PA_SINK_MESSAGE_START_MOVE, too. */
2766 pa_sink_input_detach(i);
2768 pa_sink_input_set_state_within_thread(i, i->state);
2770 /* Since the caller sleeps in pa_sink_input_unlink(),
2771 * we can safely access data outside of thread_info even
2772 * though it is mutable */
2774 pa_assert(!i->sync_prev);
2775 pa_assert(!i->sync_next);
2777 if (i->thread_info.sync_prev) {
2778 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2779 i->thread_info.sync_prev = NULL;
2782 if (i->thread_info.sync_next) {
2783 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2784 i->thread_info.sync_next = NULL;
2787 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2788 pa_sink_invalidate_requested_latency(s, true);
2789 pa_sink_request_rewind(s, (size_t) -1);
2791 /* In flat volume mode we need to update the volume as
2793 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2796 case PA_SINK_MESSAGE_START_MOVE: {
2797 pa_sink_input *i = PA_SINK_INPUT(userdata);
2799 /* We don't support moving synchronized streams. */
2800 pa_assert(!i->sync_prev);
2801 pa_assert(!i->sync_next);
2802 pa_assert(!i->thread_info.sync_next);
2803 pa_assert(!i->thread_info.sync_prev);
2805 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2807 size_t sink_nbytes, total_nbytes;
2809 /* The old sink probably has some audio from this
2810 * stream in its buffer. We want to "take it back" as
2811 * much as possible and play it to the new sink. We
2812 * don't know at this point how much the old sink can
2813 * rewind. We have to pick something, and that
2814 * something is the full latency of the old sink here.
2815 * So we rewind the stream buffer by the sink latency
2816 * amount, which may be more than what we should
2817 * rewind. This can result in a chunk of audio being
2818 * played both to the old sink and the new sink.
2820 * FIXME: Fix this code so that we don't have to make
2821 * guesses about how much the sink will actually be
2822 * able to rewind. If someone comes up with a solution
2823 * for this, something to note is that the part of the
2824 * latency that the old sink couldn't rewind should
2825 * ideally be compensated after the stream has moved
2826 * to the new sink by adding silence. The new sink
2827 * most likely can't start playing the moved stream
2828 * immediately, and that gap should be removed from
2829 * the "compensation silence" (at least at the time of
2830 * writing this, the move finish code will actually
2831 * already take care of dropping the new sink's
2832 * unrewindable latency, so taking into account the
2833 * unrewindable latency of the old sink is the only
2836 * The render_memblockq contents are discarded,
2837 * because when the sink changes, the format of the
2838 * audio stored in the render_memblockq may change
2839 * too, making the stored audio invalid. FIXME:
2840 * However, the read and write indices are moved back
2841 * the same amount, so if they are not the same now,
2842 * they won't be the same after the rewind either. If
2843 * the write index of the render_memblockq is ahead of
2844 * the read index, then the render_memblockq will feed
2845 * the new sink some silence first, which it shouldn't
2846 * do. The write index should be flushed to be the
2847 * same as the read index. */
2849 /* Get the latency of the sink */
2850 usec = pa_sink_get_latency_within_thread(s, false);
2851 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2852 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2854 if (total_nbytes > 0) {
2855 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2856 i->thread_info.rewrite_flush = true;
2857 pa_sink_input_process_rewind(i, sink_nbytes);
2861 pa_sink_input_detach(i);
2863 /* Let's remove the sink input ...*/
2864 pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2866 pa_sink_invalidate_requested_latency(s, true);
2868 pa_log_debug("Requesting rewind due to started move");
2869 pa_sink_request_rewind(s, (size_t) -1);
2871 /* In flat volume mode we need to update the volume as
2873 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2876 case PA_SINK_MESSAGE_FINISH_MOVE: {
2877 pa_sink_input *i = PA_SINK_INPUT(userdata);
2879 /* We don't support moving synchronized streams. */
2880 pa_assert(!i->sync_prev);
2881 pa_assert(!i->sync_next);
2882 pa_assert(!i->thread_info.sync_next);
2883 pa_assert(!i->thread_info.sync_prev);
2885 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2887 pa_sink_input_attach(i);
2889 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2893 /* In the ideal case the new sink would start playing
2894 * the stream immediately. That requires the sink to
2895 * be able to rewind all of its latency, which usually
2896 * isn't possible, so there will probably be some gap
2897 * before the moved stream becomes audible. We then
2898 * have two possibilities: 1) start playing the stream
2899 * from where it is now, or 2) drop the unrewindable
2900 * latency of the sink from the stream. With option 1
2901 * we won't lose any audio but the stream will have a
2902 * pause. With option 2 we may lose some audio but the
2903 * stream time will be somewhat in sync with the wall
2904 * clock. Lennart seems to have chosen option 2 (one
2905 * of the reasons might have been that option 1 is
2906 * actually much harder to implement), so we drop the
2907 * latency of the new sink from the moved stream and
2908 * hope that the sink will undo most of that in the
2911 /* Get the latency of the sink */
2912 usec = pa_sink_get_latency_within_thread(s, false);
2913 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2916 pa_sink_input_drop(i, nbytes);
2918 pa_log_debug("Requesting rewind due to finished move");
2919 pa_sink_request_rewind(s, nbytes);
2922 /* Updating the requested sink latency has to be done
2923 * after the sink rewind request, not before, because
2924 * otherwise the sink may limit the rewind amount
2927 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2928 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2930 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2931 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2933 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2936 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2937 pa_sink *root_sink = pa_sink_get_master(s);
2939 if (PA_LIKELY(root_sink))
2940 set_shared_volume_within_thread(root_sink);
2945 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2947 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2949 pa_sink_volume_change_push(s);
2951 /* Fall through ... */
2953 case PA_SINK_MESSAGE_SET_VOLUME:
2955 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2956 s->thread_info.soft_volume = s->soft_volume;
2957 pa_sink_request_rewind(s, (size_t) -1);
2960 /* Fall through ... */
2962 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2963 sync_input_volumes_within_thread(s);
2966 case PA_SINK_MESSAGE_GET_VOLUME:
2968 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2970 pa_sink_volume_change_flush(s);
2971 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2974 /* In case sink implementor reset SW volume. */
2975 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2976 s->thread_info.soft_volume = s->soft_volume;
2977 pa_sink_request_rewind(s, (size_t) -1);
2982 case PA_SINK_MESSAGE_SET_MUTE:
2984 if (s->thread_info.soft_muted != s->muted) {
2985 s->thread_info.soft_muted = s->muted;
2986 pa_sink_request_rewind(s, (size_t) -1);
2989 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2994 case PA_SINK_MESSAGE_GET_MUTE:
2996 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2997 return s->get_mute(s, userdata);
3001 case PA_SINK_MESSAGE_SET_STATE: {
3003 bool suspend_change =
3004 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
3005 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
3007 s->thread_info.state = PA_PTR_TO_UINT(userdata);
3009 if (s->thread_info.state == PA_SINK_SUSPENDED) {
3010 s->thread_info.rewind_nbytes = 0;
3011 s->thread_info.rewind_requested = false;
3014 if (suspend_change) {
3018 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
3019 if (i->suspend_within_thread)
3020 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
3026 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
3028 pa_usec_t *usec = userdata;
3029 *usec = pa_sink_get_requested_latency_within_thread(s);
3031 /* Yes, that's right, the IO thread will see -1 when no
3032 * explicit requested latency is configured, the main
3033 * thread will see max_latency */
3034 if (*usec == (pa_usec_t) -1)
3035 *usec = s->thread_info.max_latency;
3040 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
3041 pa_usec_t *r = userdata;
3043 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
3048 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
3049 pa_usec_t *r = userdata;
3051 r[0] = s->thread_info.min_latency;
3052 r[1] = s->thread_info.max_latency;
3057 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3059 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3062 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3064 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3067 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3069 *((size_t*) userdata) = s->thread_info.max_rewind;
3072 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3074 *((size_t*) userdata) = s->thread_info.max_request;
3077 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3079 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3082 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3084 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3087 case PA_SINK_MESSAGE_SET_PORT:
3089 pa_assert(userdata);
3091 struct sink_message_set_port *msg_data = userdata;
3092 msg_data->ret = s->set_port(s, msg_data->port);
3096 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3097 /* This message is sent from IO-thread and handled in main thread. */
3098 pa_assert_ctl_context();
3100 /* Make sure we're not messing with main thread when no longer linked */
3101 if (!PA_SINK_IS_LINKED(s->state))
3104 pa_sink_get_volume(s, true);
3105 pa_sink_get_mute(s, true);
3108 case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3109 s->thread_info.port_latency_offset = offset;
3112 case PA_SINK_MESSAGE_GET_LATENCY:
3113 case PA_SINK_MESSAGE_MAX:
3120 /* Called from main thread */
3121 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3126 pa_core_assert_ref(c);
3127 pa_assert_ctl_context();
3128 pa_assert(cause != 0);
3130 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3133 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3140 /* Called from IO thread */
3141 void pa_sink_detach_within_thread(pa_sink *s) {
3145 pa_sink_assert_ref(s);
3146 pa_sink_assert_io_context(s);
3147 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3149 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3150 pa_sink_input_detach(i);
3152 if (s->monitor_source)
3153 pa_source_detach_within_thread(s->monitor_source);
3156 /* Called from IO thread */
3157 void pa_sink_attach_within_thread(pa_sink *s) {
3161 pa_sink_assert_ref(s);
3162 pa_sink_assert_io_context(s);
3163 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3165 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3166 pa_sink_input_attach(i);
3168 if (s->monitor_source)
3169 pa_source_attach_within_thread(s->monitor_source);
3172 /* Called from IO thread */
3173 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3174 pa_sink_assert_ref(s);
3175 pa_sink_assert_io_context(s);
3176 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3178 if (nbytes == (size_t) -1)
3179 nbytes = s->thread_info.max_rewind;
3181 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3183 if (s->thread_info.rewind_requested &&
3184 nbytes <= s->thread_info.rewind_nbytes)
3187 s->thread_info.rewind_nbytes = nbytes;
3188 s->thread_info.rewind_requested = true;
3190 if (s->request_rewind)
3191 s->request_rewind(s);
3194 /* Called from IO thread */
3195 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3196 pa_usec_t result = (pa_usec_t) -1;
3199 pa_usec_t monitor_latency;
3201 pa_sink_assert_ref(s);
3202 pa_sink_assert_io_context(s);
3204 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3205 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3207 if (s->thread_info.requested_latency_valid)
3208 return s->thread_info.requested_latency;
3210 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3211 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3212 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3213 result = i->thread_info.requested_sink_latency;
3215 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3217 if (monitor_latency != (pa_usec_t) -1 &&
3218 (result == (pa_usec_t) -1 || result > monitor_latency))
3219 result = monitor_latency;
3221 if (result != (pa_usec_t) -1)
3222 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3224 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3225 /* Only cache if properly initialized */
3226 s->thread_info.requested_latency = result;
3227 s->thread_info.requested_latency_valid = true;
3233 /* Called from main thread */
3234 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3237 pa_sink_assert_ref(s);
3238 pa_assert_ctl_context();
3239 pa_assert(PA_SINK_IS_LINKED(s->state));
3241 if (s->state == PA_SINK_SUSPENDED)
3244 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3249 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3250 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3254 pa_sink_assert_ref(s);
3255 pa_sink_assert_io_context(s);
3257 if (max_rewind == s->thread_info.max_rewind)
3260 s->thread_info.max_rewind = max_rewind;
3262 if (PA_SINK_IS_LINKED(s->thread_info.state))
3263 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3264 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3266 if (s->monitor_source)
3267 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3270 /* Called from main thread */
3271 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3272 pa_sink_assert_ref(s);
3273 pa_assert_ctl_context();
3275 if (PA_SINK_IS_LINKED(s->state))
3276 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3278 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3281 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3282 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3285 pa_sink_assert_ref(s);
3286 pa_sink_assert_io_context(s);
3288 if (max_request == s->thread_info.max_request)
3291 s->thread_info.max_request = max_request;
3293 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3296 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3297 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3301 /* Called from main thread */
3302 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3303 pa_sink_assert_ref(s);
3304 pa_assert_ctl_context();
3306 if (PA_SINK_IS_LINKED(s->state))
3307 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3309 pa_sink_set_max_request_within_thread(s, max_request);
3312 /* Called from IO thread */
3313 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3317 pa_sink_assert_ref(s);
3318 pa_sink_assert_io_context(s);
3320 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3321 s->thread_info.requested_latency_valid = false;
3325 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3327 if (s->update_requested_latency)
3328 s->update_requested_latency(s);
3330 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3331 if (i->update_sink_requested_latency)
3332 i->update_sink_requested_latency(i);
3336 /* Called from main thread */
3337 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3338 pa_sink_assert_ref(s);
3339 pa_assert_ctl_context();
3341 /* min_latency == 0: no limit
3342 * min_latency anything else: specified limit
3344 * Similar for max_latency */
3346 if (min_latency < ABSOLUTE_MIN_LATENCY)
3347 min_latency = ABSOLUTE_MIN_LATENCY;
3349 if (max_latency <= 0 ||
3350 max_latency > ABSOLUTE_MAX_LATENCY)
3351 max_latency = ABSOLUTE_MAX_LATENCY;
3353 pa_assert(min_latency <= max_latency);
3355 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3356 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3357 max_latency == ABSOLUTE_MAX_LATENCY) ||
3358 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3360 if (PA_SINK_IS_LINKED(s->state)) {
3366 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3368 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3371 /* Called from main thread */
3372 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3373 pa_sink_assert_ref(s);
3374 pa_assert_ctl_context();
3375 pa_assert(min_latency);
3376 pa_assert(max_latency);
3378 if (PA_SINK_IS_LINKED(s->state)) {
3379 pa_usec_t r[2] = { 0, 0 };
3381 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3383 *min_latency = r[0];
3384 *max_latency = r[1];
3386 *min_latency = s->thread_info.min_latency;
3387 *max_latency = s->thread_info.max_latency;
3391 /* Called from IO thread */
3392 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3393 pa_sink_assert_ref(s);
3394 pa_sink_assert_io_context(s);
3396 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3397 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3398 pa_assert(min_latency <= max_latency);
3400 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3401 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3402 max_latency == ABSOLUTE_MAX_LATENCY) ||
3403 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3405 if (s->thread_info.min_latency == min_latency &&
3406 s->thread_info.max_latency == max_latency)
3409 s->thread_info.min_latency = min_latency;
3410 s->thread_info.max_latency = max_latency;
3412 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3416 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3417 if (i->update_sink_latency_range)
3418 i->update_sink_latency_range(i);
3421 pa_sink_invalidate_requested_latency(s, false);
3423 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3426 /* Called from main thread */
3427 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3428 pa_sink_assert_ref(s);
3429 pa_assert_ctl_context();
3431 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3432 pa_assert(latency == 0);
3436 if (latency < ABSOLUTE_MIN_LATENCY)
3437 latency = ABSOLUTE_MIN_LATENCY;
3439 if (latency > ABSOLUTE_MAX_LATENCY)
3440 latency = ABSOLUTE_MAX_LATENCY;
3442 if (PA_SINK_IS_LINKED(s->state))
3443 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3445 s->thread_info.fixed_latency = latency;
3447 pa_source_set_fixed_latency(s->monitor_source, latency);
3450 /* Called from main thread */
3451 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3454 pa_sink_assert_ref(s);
3455 pa_assert_ctl_context();
3457 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3460 if (PA_SINK_IS_LINKED(s->state))
3461 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3463 latency = s->thread_info.fixed_latency;
3468 /* Called from IO thread */
3469 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3470 pa_sink_assert_ref(s);
3471 pa_sink_assert_io_context(s);
3473 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3474 pa_assert(latency == 0);
3475 s->thread_info.fixed_latency = 0;
3477 if (s->monitor_source)
3478 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3483 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3484 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3486 if (s->thread_info.fixed_latency == latency)
3489 s->thread_info.fixed_latency = latency;
3491 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3495 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3496 if (i->update_sink_fixed_latency)
3497 i->update_sink_fixed_latency(i);
3500 pa_sink_invalidate_requested_latency(s, false);
3502 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3505 /* Called from main context */
3506 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3507 pa_sink_assert_ref(s);
3509 s->port_latency_offset = offset;
3511 if (PA_SINK_IS_LINKED(s->state))
3512 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3514 s->thread_info.port_latency_offset = offset;
3516 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3519 /* Called from main context */
3520 size_t pa_sink_get_max_rewind(pa_sink *s) {
3522 pa_assert_ctl_context();
3523 pa_sink_assert_ref(s);
3525 if (!PA_SINK_IS_LINKED(s->state))
3526 return s->thread_info.max_rewind;
3528 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3533 /* Called from main context */
3534 size_t pa_sink_get_max_request(pa_sink *s) {
3536 pa_sink_assert_ref(s);
3537 pa_assert_ctl_context();
3539 if (!PA_SINK_IS_LINKED(s->state))
3540 return s->thread_info.max_request;
3542 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3547 /* Called from main context */
3548 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3549 pa_device_port *port;
3552 pa_sink_assert_ref(s);
3553 pa_assert_ctl_context();
3556 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3557 return -PA_ERR_NOTIMPLEMENTED;
3561 return -PA_ERR_NOENTITY;
3563 if (!(port = pa_hashmap_get(s->ports, name)))
3564 return -PA_ERR_NOENTITY;
3566 if (s->active_port == port) {
3567 s->save_port = s->save_port || save;
3571 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3572 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3573 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3577 ret = s->set_port(s, port);
3580 return -PA_ERR_NOENTITY;
3582 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3584 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3586 s->active_port = port;
3587 s->save_port = save;
3589 pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3591 /* The active port affects the default sink selection. */
3592 pa_core_update_default_sink(s->core);
3594 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3599 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3600 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3604 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3607 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3609 if (pa_streq(ff, "microphone"))
3610 t = "audio-input-microphone";
3611 else if (pa_streq(ff, "webcam"))
3613 else if (pa_streq(ff, "computer"))
3615 else if (pa_streq(ff, "handset"))
3617 else if (pa_streq(ff, "portable"))
3618 t = "multimedia-player";
3619 else if (pa_streq(ff, "tv"))
3620 t = "video-display";
3623 * The following icons are not part of the icon naming spec,
3624 * because Rodney Dawes sucks as the maintainer of that spec.
3626 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3628 else if (pa_streq(ff, "headset"))
3629 t = "audio-headset";
3630 else if (pa_streq(ff, "headphone"))
3631 t = "audio-headphones";
3632 else if (pa_streq(ff, "speaker"))
3633 t = "audio-speakers";
3634 else if (pa_streq(ff, "hands-free"))
3635 t = "audio-handsfree";
3639 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3640 if (pa_streq(c, "modem"))
3647 t = "audio-input-microphone";
3650 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3651 if (strstr(profile, "analog"))
3653 else if (strstr(profile, "iec958"))
3655 else if (strstr(profile, "hdmi"))
3659 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3661 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3666 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3667 const char *s, *d = NULL, *k;
3670 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3674 if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3678 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3679 if (pa_streq(s, "internal"))
3680 d = _("Built-in Audio");
3683 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3684 if (pa_streq(s, "modem"))
3688 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3693 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3696 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3698 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3703 bool pa_device_init_intended_roles(pa_proplist *p) {
3707 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3710 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3711 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3712 || pa_streq(s, "headset")) {
3713 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3720 unsigned pa_device_init_priority(pa_proplist *p) {
3722 unsigned priority = 0;
3726 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3728 if (pa_streq(s, "sound"))
3730 else if (!pa_streq(s, "modem"))
3734 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3736 if (pa_streq(s, "headphone"))
3738 else if (pa_streq(s, "hifi"))
3740 else if (pa_streq(s, "speaker"))
3742 else if (pa_streq(s, "portable"))
3746 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3748 if (pa_streq(s, "bluetooth"))
3750 else if (pa_streq(s, "usb"))
3752 else if (pa_streq(s, "pci"))
3756 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3758 if (pa_startswith(s, "analog-"))
3760 else if (pa_startswith(s, "iec958-"))
3767 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3769 /* Called from the IO thread. */
3770 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3771 pa_sink_volume_change *c;
3772 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3773 c = pa_xnew(pa_sink_volume_change, 1);
3775 PA_LLIST_INIT(pa_sink_volume_change, c);
3777 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3781 /* Called from the IO thread. */
3782 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3784 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3788 /* Called from the IO thread. */
3789 void pa_sink_volume_change_push(pa_sink *s) {
3790 pa_sink_volume_change *c = NULL;
3791 pa_sink_volume_change *nc = NULL;
3792 pa_sink_volume_change *pc = NULL;
3793 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3795 const char *direction = NULL;
3798 nc = pa_sink_volume_change_new(s);
3800 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3801 * Adding one more volume for HW would get us rid of this, but I am trying
3802 * to survive with the ones we already have. */
3803 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3805 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3806 pa_log_debug("Volume not changing");
3807 pa_sink_volume_change_free(nc);
3811 nc->at = pa_sink_get_latency_within_thread(s, false);
3812 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3814 if (s->thread_info.volume_changes_tail) {
3815 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3816 /* If volume is going up let's do it a bit late. If it is going
3817 * down let's do it a bit early. */
3818 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3819 if (nc->at + safety_margin > c->at) {
3820 nc->at += safety_margin;
3825 else if (nc->at - safety_margin > c->at) {
3826 nc->at -= safety_margin;
3834 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3835 nc->at += safety_margin;
3838 nc->at -= safety_margin;
3841 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3844 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3847 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3849 /* We can ignore volume events that came earlier but should happen later than this. */
3850 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3851 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3852 pa_sink_volume_change_free(c);
3855 s->thread_info.volume_changes_tail = nc;
3858 /* Called from the IO thread. */
3859 static void pa_sink_volume_change_flush(pa_sink *s) {
3860 pa_sink_volume_change *c = s->thread_info.volume_changes;
3862 s->thread_info.volume_changes = NULL;
3863 s->thread_info.volume_changes_tail = NULL;
3865 pa_sink_volume_change *next = c->next;
3866 pa_sink_volume_change_free(c);
3871 /* Called from the IO thread. */
3872 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3878 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3884 pa_assert(s->write_volume);
3886 now = pa_rtclock_now();
3888 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3889 pa_sink_volume_change *c = s->thread_info.volume_changes;
3890 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3891 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3892 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3894 s->thread_info.current_hw_volume = c->hw_volume;
3895 pa_sink_volume_change_free(c);
3901 if (s->thread_info.volume_changes) {
3903 *usec_to_next = s->thread_info.volume_changes->at - now;
3904 if (pa_log_ratelimit(PA_LOG_DEBUG))
3905 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3910 s->thread_info.volume_changes_tail = NULL;
3915 /* Called from the IO thread. */
3916 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3917 /* All the queued volume events later than current latency are shifted to happen earlier. */
3918 pa_sink_volume_change *c;
3919 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3920 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3921 pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3923 pa_log_debug("latency = %lld", (long long) limit);
3924 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3926 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3927 pa_usec_t modified_limit = limit;
3928 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3929 modified_limit -= s->thread_info.volume_change_safety_margin;
3931 modified_limit += s->thread_info.volume_change_safety_margin;
3932 if (c->at > modified_limit) {
3934 if (c->at < modified_limit)
3935 c->at = modified_limit;
3937 prev_vol = pa_cvolume_avg(&c->hw_volume);
3939 pa_sink_volume_change_apply(s, NULL);
3942 /* Called from the main thread */
3943 /* Gets the list of formats supported by the sink. The members and idxset must
3944 * be freed by the caller. */
3945 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3950 if (s->get_formats) {
3951 /* Sink supports format query, all is good */
3952 ret = s->get_formats(s);
3954 /* Sink doesn't support format query, so assume it does PCM */
3955 pa_format_info *f = pa_format_info_new();
3956 f->encoding = PA_ENCODING_PCM;
3958 ret = pa_idxset_new(NULL, NULL);
3959 pa_idxset_put(ret, f, NULL);
3965 /* Called from the main thread */
3966 /* Allows an external source to set what formats a sink supports if the sink
3967 * permits this. The function makes a copy of the formats on success. */
3968 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3973 /* Sink supports setting formats -- let's give it a shot */
3974 return s->set_formats(s, formats);
3976 /* Sink doesn't support setting this -- bail out */
3980 /* Called from the main thread */
3981 /* Checks if the sink can accept this format */
3982 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3983 pa_idxset *formats = NULL;
3989 formats = pa_sink_get_formats(s);
3992 pa_format_info *finfo_device;
3995 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3996 if (pa_format_info_is_compatible(finfo_device, f)) {
4002 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
4008 /* Called from the main thread */
4009 /* Calculates the intersection between formats supported by the sink and
4010 * in_formats, and returns these, in the order of the sink's formats. */
4011 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
4012 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
4013 pa_format_info *f_sink, *f_in;
4018 if (!in_formats || pa_idxset_isempty(in_formats))
4021 sink_formats = pa_sink_get_formats(s);
4023 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
4024 PA_IDXSET_FOREACH(f_in, in_formats, j) {
4025 if (pa_format_info_is_compatible(f_sink, f_in))
4026 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
4032 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
4037 /* Called from the main thread. */
4038 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4039 pa_cvolume old_volume;
4040 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4041 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4046 old_volume = s->reference_volume;
4048 if (pa_cvolume_equal(volume, &old_volume))
4051 s->reference_volume = *volume;
4052 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4053 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4054 s->flags & PA_SINK_DECIBEL_VOLUME),
4055 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4056 s->flags & PA_SINK_DECIBEL_VOLUME));
4058 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4059 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);