2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #include <pulse/introspect.h>
35 #include <pulse/format.h>
36 #include <pulse/utf8.h>
37 #include <pulse/xmalloc.h>
38 #include <pulse/timeval.h>
39 #include <pulse/util.h>
40 #include <pulse/rtclock.h>
41 #include <pulse/internal.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/sink-input.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/core-util.h>
47 #include <pulsecore/sample-util.h>
48 #include <pulsecore/mix.h>
49 #include <pulsecore/core-subscribe.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/play-memblockq.h>
53 #include <pulsecore/flist.h>
57 #define MAX_MIX_CHANNELS 32
58 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
59 #define ABSOLUTE_MIN_LATENCY (500)
60 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
61 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
63 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
65 struct pa_sink_volume_change {
69 PA_LLIST_FIELDS(pa_sink_volume_change);
72 struct sink_message_set_port {
78 //#define PA_DUMP_SINK_FOR_EACH_SUSPEND
79 #define PA_DUMP_SINK_PATH_PREFIX "/tmp/dump_pa_sink"
82 static void sink_free(pa_object *s);
84 static void pa_sink_volume_change_push(pa_sink *s);
85 static void pa_sink_volume_change_flush(pa_sink *s);
86 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
89 static void __toggle_open_close_n_write_dump(pa_sink *s, pa_memchunk *target)
91 /* open file for dump pcm */
92 if (s->core->dump_sink && !s->dump_fp) {
93 char *dump_path = NULL, *dump_path_surfix = NULL;
94 const char *s_device_api_str;
95 #ifdef PA_DUMP_SINK_FOR_EACH_SUSPEND
100 memset(&datetime[0], 0x00, sizeof(datetime));
101 strftime(&datetime[0], sizeof(datetime), "%m%d_%H%M%S", localtime(&t));
104 if ((s_device_api_str = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_API))) {
105 if (pa_streq(s_device_api_str, "alsa")) {
106 const char *card_idx_str, *device_idx_str;
107 uint32_t card_idx = 0, device_idx = 0;
109 if ((card_idx_str = pa_proplist_gets(s->proplist, "alsa.card")))
110 pa_atou(card_idx_str, &card_idx);
111 if ((device_idx_str = pa_proplist_gets(s->proplist, "alsa.device")))
112 pa_atou(device_idx_str, &device_idx);
113 dump_path_surfix = pa_sprintf_malloc("alsa_%d_%d.pcm", card_idx, device_idx);
114 } else if (pa_streq(s_device_api_str, "bluez")) {
115 dump_path_surfix = pa_sprintf_malloc("bluez.pcm");
118 if (!dump_path_surfix) {
119 dump_path_surfix = pa_sprintf_malloc("idx_%d.pcm", s->index);
122 #ifdef PA_DUMP_SINK_FOR_EACH_SUSPEND
123 dump_path = pa_sprintf_malloc("%s_%s_%s", PA_DUMP_SINK_PATH_PREFIX, &datetime[0], dump_path_surfix);
125 dump_path = pa_sprintf_malloc("%s_%s", PA_DUMP_SINK_PATH_PREFIX, dump_path_surfix);
129 s->dump_fp = fopen(dump_path, "w");
130 pa_log_info("pa_sink dump started:%s", dump_path);
133 if (dump_path_surfix)
134 pa_xfree(dump_path_surfix);
135 /* close file for dump pcm when config is changed */
136 } else if (!s->core->dump_sink && s->dump_fp) {
145 ptr = pa_memblock_acquire(target->memblock);
147 fwrite((uint8_t*) ptr + target->index, 1, target->length, s->dump_fp);
149 pa_memblock_release(target->memblock);
154 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
158 data->proplist = pa_proplist_new();
159 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
164 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
167 pa_xfree(data->name);
168 data->name = pa_xstrdup(name);
171 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
174 if ((data->sample_spec_is_set = !!spec))
175 data->sample_spec = *spec;
178 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
181 if ((data->channel_map_is_set = !!map))
182 data->channel_map = *map;
185 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
188 data->alternate_sample_rate_is_set = true;
189 data->alternate_sample_rate = alternate_sample_rate;
192 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
195 if ((data->volume_is_set = !!volume))
196 data->volume = *volume;
199 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
202 data->muted_is_set = true;
203 data->muted = !!mute;
206 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
209 pa_xfree(data->active_port);
210 data->active_port = pa_xstrdup(port);
213 void pa_sink_new_data_done(pa_sink_new_data *data) {
216 pa_proplist_free(data->proplist);
219 pa_hashmap_free(data->ports);
221 pa_xfree(data->name);
222 pa_xfree(data->active_port);
225 /* Called from main context */
226 static void reset_callbacks(pa_sink *s) {
230 s->get_volume = NULL;
231 s->set_volume = NULL;
232 s->write_volume = NULL;
235 s->request_rewind = NULL;
236 s->update_requested_latency = NULL;
238 s->get_formats = NULL;
239 s->set_formats = NULL;
240 s->update_rate = NULL;
243 /* Called from main context */
244 pa_sink* pa_sink_new(
246 pa_sink_new_data *data,
247 pa_sink_flags_t flags) {
251 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
252 pa_source_new_data source_data;
258 pa_assert(data->name);
259 pa_assert_ctl_context();
261 s = pa_msgobject_new(pa_sink);
263 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
264 pa_log_debug("Failed to register name %s.", data->name);
269 pa_sink_new_data_set_name(data, name);
271 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
273 pa_namereg_unregister(core, name);
277 /* FIXME, need to free s here on failure */
279 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
280 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
282 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
284 if (!data->channel_map_is_set)
285 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
287 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
288 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
290 /* FIXME: There should probably be a general function for checking whether
291 * the sink volume is allowed to be set, like there is for sink inputs. */
292 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
294 if (!data->volume_is_set) {
295 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
296 data->save_volume = false;
299 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
300 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
302 if (!data->muted_is_set)
306 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
308 pa_device_init_description(data->proplist);
309 pa_device_init_icon(data->proplist, true);
310 pa_device_init_intended_roles(data->proplist);
312 if (!data->active_port) {
313 pa_device_port *p = pa_device_port_find_best(data->ports);
315 pa_sink_new_data_set_port(data, p->name);
318 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
320 pa_namereg_unregister(core, name);
324 s->parent.parent.free = sink_free;
325 s->parent.process_msg = pa_sink_process_msg;
328 s->state = PA_SINK_INIT;
331 s->suspend_cause = data->suspend_cause;
332 pa_sink_set_mixer_dirty(s, false);
333 s->name = pa_xstrdup(name);
334 s->proplist = pa_proplist_copy(data->proplist);
335 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
336 s->module = data->module;
337 s->card = data->card;
339 s->priority = pa_device_init_priority(s->proplist);
341 s->sample_spec = data->sample_spec;
342 s->channel_map = data->channel_map;
343 s->default_sample_rate = s->sample_spec.rate;
345 if (data->alternate_sample_rate_is_set)
346 s->alternate_sample_rate = data->alternate_sample_rate;
348 s->alternate_sample_rate = s->core->alternate_sample_rate;
350 if (s->sample_spec.rate == s->alternate_sample_rate) {
351 pa_log_warn("Default and alternate sample rates are the same.");
352 s->alternate_sample_rate = 0;
355 s->inputs = pa_idxset_new(NULL, NULL);
357 s->input_to_master = NULL;
359 s->reference_volume = s->real_volume = data->volume;
360 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
361 s->base_volume = PA_VOLUME_NORM;
362 s->n_volume_steps = PA_VOLUME_NORM+1;
363 s->muted = data->muted;
364 s->refresh_volume = s->refresh_muted = false;
371 /* As a minor optimization we just steal the list instead of
373 s->ports = data->ports;
376 s->active_port = NULL;
377 s->save_port = false;
379 if (data->active_port)
380 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
381 s->save_port = data->save_port;
383 /* Hopefully the active port has already been assigned in the previous call
384 to pa_device_port_find_best, but better safe than sorry */
386 s->active_port = pa_device_port_find_best(s->ports);
389 s->latency_offset = s->active_port->latency_offset;
391 s->latency_offset = 0;
393 s->save_volume = data->save_volume;
394 s->save_muted = data->save_muted;
399 pa_silence_memchunk_get(
400 &core->silence_cache,
406 pa_cvolume_ramp_int_init(&s->ramp, PA_VOLUME_NORM, data->sample_spec.channels);
408 s->thread_info.rtpoll = NULL;
409 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
410 (pa_free_cb_t) pa_sink_input_unref);
411 s->thread_info.soft_volume = s->soft_volume;
412 s->thread_info.soft_muted = s->muted;
413 s->thread_info.state = s->state;
414 s->thread_info.rewind_nbytes = 0;
415 s->thread_info.rewind_requested = false;
416 s->thread_info.max_rewind = 0;
417 s->thread_info.max_request = 0;
418 s->thread_info.requested_latency_valid = false;
419 s->thread_info.requested_latency = 0;
420 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
421 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
422 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
424 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
425 s->thread_info.volume_changes_tail = NULL;
426 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
427 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
428 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
429 s->thread_info.latency_offset = s->latency_offset;
431 s->thread_info.ramp = s->ramp;
433 /* FIXME: This should probably be moved to pa_sink_put() */
434 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
437 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
439 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
440 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
443 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
444 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
448 pa_source_new_data_init(&source_data);
449 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
450 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
451 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
452 source_data.name = pa_sprintf_malloc("%s.monitor", name);
453 source_data.driver = data->driver;
454 source_data.module = data->module;
455 source_data.card = data->card;
457 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
458 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
459 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
461 s->monitor_source = pa_source_new(core, &source_data,
462 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
463 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
465 pa_source_new_data_done(&source_data);
467 if (!s->monitor_source) {
473 s->monitor_source->monitor_of = s;
475 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
476 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
477 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
482 /* Called from main context */
483 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
486 pa_sink_state_t original_state;
489 pa_assert_ctl_context();
491 if (s->state == state)
494 original_state = s->state;
497 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
498 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
501 if ((ret = s->set_state(s, state)) < 0)
505 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
508 s->set_state(s, original_state);
515 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
516 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
517 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
520 if (suspend_change) {
524 /* We're suspending or resuming, tell everyone about it */
526 PA_IDXSET_FOREACH(i, s->inputs, idx)
527 if (s->state == PA_SINK_SUSPENDED &&
528 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
529 pa_sink_input_kill(i);
531 i->suspend(i, state == PA_SINK_SUSPENDED);
533 if (s->monitor_source)
534 pa_source_sync_suspend(s->monitor_source);
540 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
546 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
547 pa_sink_flags_t flags;
550 pa_assert(!s->write_volume || cb);
554 /* Save the current flags so we can tell if they've changed */
558 /* The sink implementor is responsible for setting decibel volume support */
559 s->flags |= PA_SINK_HW_VOLUME_CTRL;
561 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
562 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
563 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
566 /* If the flags have changed after init, let any clients know via a change event */
567 if (s->state != PA_SINK_INIT && flags != s->flags)
568 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
571 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
572 pa_sink_flags_t flags;
575 pa_assert(!cb || s->set_volume);
577 s->write_volume = cb;
579 /* Save the current flags so we can tell if they've changed */
583 s->flags |= PA_SINK_DEFERRED_VOLUME;
585 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
587 /* If the flags have changed after init, let any clients know via a change event */
588 if (s->state != PA_SINK_INIT && flags != s->flags)
589 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
592 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
598 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
599 pa_sink_flags_t flags;
605 /* Save the current flags so we can tell if they've changed */
609 s->flags |= PA_SINK_HW_MUTE_CTRL;
611 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
613 /* If the flags have changed after init, let any clients know via a change event */
614 if (s->state != PA_SINK_INIT && flags != s->flags)
615 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
618 static void enable_flat_volume(pa_sink *s, bool enable) {
619 pa_sink_flags_t flags;
623 /* Always follow the overall user preference here */
624 enable = enable && s->core->flat_volumes;
626 /* Save the current flags so we can tell if they've changed */
630 s->flags |= PA_SINK_FLAT_VOLUME;
632 s->flags &= ~PA_SINK_FLAT_VOLUME;
634 /* If the flags have changed after init, let any clients know via a change event */
635 if (s->state != PA_SINK_INIT && flags != s->flags)
636 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
639 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
640 pa_sink_flags_t flags;
644 /* Save the current flags so we can tell if they've changed */
648 s->flags |= PA_SINK_DECIBEL_VOLUME;
649 enable_flat_volume(s, true);
651 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
652 enable_flat_volume(s, false);
655 /* If the flags have changed after init, let any clients know via a change event */
656 if (s->state != PA_SINK_INIT && flags != s->flags)
657 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
660 /* Called from main context */
661 void pa_sink_put(pa_sink* s) {
662 pa_sink_assert_ref(s);
663 pa_assert_ctl_context();
665 pa_assert(s->state == PA_SINK_INIT);
666 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
668 /* The following fields must be initialized properly when calling _put() */
669 pa_assert(s->asyncmsgq);
670 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
672 /* Generally, flags should be initialized via pa_sink_new(). As a
673 * special exception we allow some volume related flags to be set
674 * between _new() and _put() by the callback setter functions above.
676 * Thus we implement a couple safeguards here which ensure the above
677 * setters were used (or at least the implementor made manual changes
678 * in a compatible way).
680 * Note: All of these flags set here can change over the life time
682 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
683 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
684 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
686 /* XXX: Currently decibel volume is disabled for all sinks that use volume
687 * sharing. When the master sink supports decibel volume, it would be good
688 * to have the flag also in the filter sink, but currently we don't do that
689 * so that the flags of the filter sink never change when it's moved from
690 * a master sink to another. One solution for this problem would be to
691 * remove user-visible volume altogether from filter sinks when volume
692 * sharing is used, but the current approach was easier to implement... */
693 /* We always support decibel volumes in software, otherwise we leave it to
694 * the sink implementor to set this flag as needed.
696 * Note: This flag can also change over the life time of the sink. */
697 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
698 pa_sink_enable_decibel_volume(s, true);
700 /* If the sink implementor support DB volumes by itself, we should always
701 * try and enable flat volumes too */
702 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
703 enable_flat_volume(s, true);
705 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
706 pa_sink *root_sink = pa_sink_get_master(s);
708 pa_assert(root_sink);
710 s->reference_volume = root_sink->reference_volume;
711 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
713 s->real_volume = root_sink->real_volume;
714 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
716 /* We assume that if the sink implementor changed the default
717 * volume he did so in real_volume, because that is the usual
718 * place where he is supposed to place his changes. */
719 s->reference_volume = s->real_volume;
721 s->thread_info.soft_volume = s->soft_volume;
722 s->thread_info.soft_muted = s->muted;
723 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
725 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
726 || (s->base_volume == PA_VOLUME_NORM
727 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
728 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
729 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
730 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
731 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
733 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
734 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
735 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
737 if (s->suspend_cause)
738 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
740 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
742 pa_source_put(s->monitor_source);
745 pa_device_port_active_changed(s->active_port, true);
747 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
748 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
751 /* Called from main context */
752 void pa_sink_unlink(pa_sink* s) {
754 pa_sink_input *i, *j = NULL;
757 pa_assert_ctl_context();
759 /* Please note that pa_sink_unlink() does more than simply
760 * reversing pa_sink_put(). It also undoes the registrations
761 * already done in pa_sink_new()! */
763 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
764 * may be called multiple times on the same sink without bad
767 linked = PA_SINK_IS_LINKED(s->state);
770 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
773 pa_device_port_active_changed(s->active_port, false);
775 if (s->state != PA_SINK_UNLINKED)
776 pa_namereg_unregister(s->core, s->name);
777 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
780 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
782 while ((i = pa_idxset_first(s->inputs, NULL))) {
784 pa_sink_input_kill(i);
789 sink_set_state(s, PA_SINK_UNLINKED);
791 s->state = PA_SINK_UNLINKED;
795 if (s->monitor_source)
796 pa_source_unlink(s->monitor_source);
799 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
800 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
804 /* Called from main context */
805 static void sink_free(pa_object *o) {
806 pa_sink *s = PA_SINK(o);
809 pa_assert_ctl_context();
810 pa_assert(pa_sink_refcnt(s) == 0);
812 if (PA_SINK_IS_LINKED(s->state))
815 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
817 if (s->monitor_source) {
818 pa_source_unref(s->monitor_source);
819 s->monitor_source = NULL;
822 pa_idxset_free(s->inputs, NULL);
823 pa_hashmap_free(s->thread_info.inputs);
825 if (s->silence.memblock)
826 pa_memblock_unref(s->silence.memblock);
832 pa_proplist_free(s->proplist);
835 pa_hashmap_free(s->ports);
838 /* close file for dump pcm */
847 /* Called from main context, and not while the IO thread is active, please */
848 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
849 pa_sink_assert_ref(s);
850 pa_assert_ctl_context();
854 if (s->monitor_source)
855 pa_source_set_asyncmsgq(s->monitor_source, q);
858 /* Called from main context, and not while the IO thread is active, please */
859 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
860 pa_sink_flags_t old_flags;
861 pa_sink_input *input;
864 pa_sink_assert_ref(s);
865 pa_assert_ctl_context();
867 /* For now, allow only a minimal set of flags to be changed. */
868 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
870 old_flags = s->flags;
871 s->flags = (s->flags & ~mask) | (value & mask);
873 if (s->flags == old_flags)
876 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
877 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
879 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
880 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
881 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
883 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
884 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
886 if (s->monitor_source)
887 pa_source_update_flags(s->monitor_source,
888 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
889 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
890 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
891 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
893 PA_IDXSET_FOREACH(input, s->inputs, idx) {
894 if (input->origin_sink)
895 pa_sink_update_flags(input->origin_sink, mask, value);
899 /* Called from IO context, or before _put() from main context */
900 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
901 pa_sink_assert_ref(s);
902 pa_sink_assert_io_context(s);
904 s->thread_info.rtpoll = p;
906 if (s->monitor_source)
907 pa_source_set_rtpoll(s->monitor_source, p);
910 /* Called from main context */
911 int pa_sink_update_status(pa_sink*s) {
912 pa_sink_assert_ref(s);
913 pa_assert_ctl_context();
914 pa_assert(PA_SINK_IS_LINKED(s->state));
916 if (s->state == PA_SINK_SUSPENDED)
919 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
922 /* Called from any context - must be threadsafe */
923 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
924 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
927 /* Called from main context */
928 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
932 pa_sink_assert_ref(s);
933 pa_assert_ctl_context();
934 pa_assert(PA_SINK_IS_LINKED(s->state));
935 pa_assert(cause != 0);
938 s->suspend_cause |= cause;
939 s->monitor_source->suspend_cause |= cause;
941 s->suspend_cause &= ~cause;
942 s->monitor_source->suspend_cause &= ~cause;
945 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
946 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
947 it'll be handled just fine. */
948 pa_sink_set_mixer_dirty(s, false);
949 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
950 if (s->active_port && s->set_port) {
951 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
952 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
953 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
956 s->set_port(s, s->active_port);
966 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause) {
968 if (cause == PA_SUSPEND_INTERNAL) {
969 /* Clear suspend by switch after manual suspend */
970 s->suspend_cause &= ~cause;
976 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
979 #ifdef PA_DUMP_SINK_FOR_EACH_SUSPEND
980 /* close file for dump pcm */
981 if (suspend && s->dump_in_fp) {
982 fclose(s->dump_in_fp);
983 s->dump_in_fp = NULL;
985 if (suspend && s->dump_out_fp) {
986 fclose(s->dump_out_fp);
987 s->dump_out_fp = NULL;
991 if (s->suspend_cause) {
992 ret = sink_set_state(s, PA_SINK_SUSPENDED);
993 if (ret == 0 && cause == PA_SUSPEND_INTERNAL) {
994 /* Clear suspend by switch after manual suspend */
995 s->suspend_cause &= ~cause;
1000 if (s->suspend_cause)
1001 return sink_set_state(s, PA_SINK_SUSPENDED);
1003 #endif /* __TIZEN__ */
1004 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
1007 /* Called from main context */
1008 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
1009 pa_sink_input *i, *n;
1012 pa_sink_assert_ref(s);
1013 pa_assert_ctl_context();
1014 pa_assert(PA_SINK_IS_LINKED(s->state));
1019 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
1020 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
1022 pa_sink_input_ref(i);
1024 if (pa_sink_input_start_move(i) >= 0)
1025 pa_queue_push(q, i);
1027 pa_sink_input_unref(i);
1033 /* Called from main context */
1034 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
1037 pa_sink_assert_ref(s);
1038 pa_assert_ctl_context();
1039 pa_assert(PA_SINK_IS_LINKED(s->state));
1042 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1043 if (pa_sink_input_finish_move(i, s, save) < 0)
1044 pa_sink_input_fail_move(i);
1046 pa_sink_input_unref(i);
1049 pa_queue_free(q, NULL);
1052 /* Called from main context */
1053 void pa_sink_move_all_fail(pa_queue *q) {
1056 pa_assert_ctl_context();
1059 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
1060 pa_sink_input_fail_move(i);
1061 pa_sink_input_unref(i);
1064 pa_queue_free(q, NULL);
1067 /* Called from IO thread context */
1068 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1073 pa_sink_assert_ref(s);
1074 pa_sink_assert_io_context(s);
1076 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1077 size_t uf = i->thread_info.underrun_for_sink;
1080 if (uf >= left_to_play) {
1081 if (pa_sink_input_process_underrun(i))
1084 else if (uf > result)
1089 pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result, (long) left_to_play - result);
1090 return left_to_play - result;
1093 /* Called from IO thread context */
1094 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1098 pa_sink_assert_ref(s);
1099 pa_sink_assert_io_context(s);
1100 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1102 /* If nobody requested this and this is actually no real rewind
1103 * then we can short cut this. Please note that this means that
1104 * not all rewind requests triggered upstream will always be
1105 * translated in actual requests! */
1106 if (!s->thread_info.rewind_requested && nbytes <= 0)
1109 s->thread_info.rewind_nbytes = 0;
1110 s->thread_info.rewind_requested = false;
1113 pa_log_debug("Processing rewind...");
1114 if (s->flags & PA_SINK_DEFERRED_VOLUME)
1115 pa_sink_volume_change_rewind(s, nbytes);
1119 fseeko(s->dump_fp, (off_t)nbytes * (-1), SEEK_CUR);
1124 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1125 pa_sink_input_assert_ref(i);
1126 pa_sink_input_process_rewind(i, nbytes);
1130 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1131 pa_source_process_rewind(s->monitor_source, nbytes);
1135 /* Called from IO thread context */
1136 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1140 size_t mixlength = *length;
1142 pa_sink_assert_ref(s);
1143 pa_sink_assert_io_context(s);
1146 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1147 pa_sink_input_assert_ref(i);
1149 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1151 if (mixlength == 0 || info->chunk.length < mixlength)
1152 mixlength = info->chunk.length;
1154 if (pa_memblock_is_silence(info->chunk.memblock)) {
1155 pa_memblock_unref(info->chunk.memblock);
1159 info->userdata = pa_sink_input_ref(i);
1161 pa_assert(info->chunk.memblock);
1162 pa_assert(info->chunk.length > 0);
1170 *length = mixlength;
1175 /* Called from IO thread context */
1176 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1180 unsigned n_unreffed = 0;
1182 pa_sink_assert_ref(s);
1183 pa_sink_assert_io_context(s);
1185 pa_assert(result->memblock);
1186 pa_assert(result->length > 0);
1188 /* We optimize for the case where the order of the inputs has not changed */
1190 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1192 pa_mix_info* m = NULL;
1194 pa_sink_input_assert_ref(i);
1196 /* Let's try to find the matching entry info the pa_mix_info array */
1197 for (j = 0; j < n; j ++) {
1199 if (info[p].userdata == i) {
1209 /* Drop read data */
1210 pa_sink_input_drop(i, result->length);
1212 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1214 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1215 void *ostate = NULL;
1216 pa_source_output *o;
1219 if (m && m->chunk.memblock) {
1221 pa_memblock_ref(c.memblock);
1222 pa_assert(result->length <= c.length);
1223 c.length = result->length;
1225 pa_memchunk_make_writable(&c, 0);
1226 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1229 pa_memblock_ref(c.memblock);
1230 pa_assert(result->length <= c.length);
1231 c.length = result->length;
1234 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1235 pa_source_output_assert_ref(o);
1236 pa_assert(o->direct_on_input == i);
1237 pa_source_post_direct(s->monitor_source, o, &c);
1240 pa_memblock_unref(c.memblock);
1245 if (m->chunk.memblock) {
1246 pa_memblock_unref(m->chunk.memblock);
1247 pa_memchunk_reset(&m->chunk);
1250 pa_sink_input_unref(m->userdata);
1257 /* Now drop references to entries that are included in the
1258 * pa_mix_info array but don't exist anymore */
1260 if (n_unreffed < n) {
1261 for (; n > 0; info++, n--) {
1263 pa_sink_input_unref(info->userdata);
1264 if (info->chunk.memblock)
1265 pa_memblock_unref(info->chunk.memblock);
1269 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1270 pa_source_post(s->monitor_source, result);
1273 /* Called from IO thread context */
1274 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1275 pa_mix_info info[MAX_MIX_CHANNELS];
1277 size_t block_size_max;
1279 pa_sink_assert_ref(s);
1280 pa_sink_assert_io_context(s);
1281 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1282 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1285 pa_assert(!s->thread_info.rewind_requested);
1286 pa_assert(s->thread_info.rewind_nbytes == 0);
1288 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1289 result->memblock = pa_memblock_ref(s->silence.memblock);
1290 result->index = s->silence.index;
1291 result->length = PA_MIN(s->silence.length, length);
1298 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1300 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1301 if (length > block_size_max)
1302 length = pa_frame_align(block_size_max, &s->sample_spec);
1304 pa_assert(length > 0);
1306 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1310 *result = s->silence;
1311 pa_memblock_ref(result->memblock);
1313 if (result->length > length)
1314 result->length = length;
1316 } else if (n == 1) {
1320 *result = info[0].chunk;
1321 pa_memblock_ref(result->memblock);
1323 if (result->length > length)
1324 result->length = length;
1326 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1328 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1329 pa_memblock_unref(result->memblock);
1330 pa_silence_memchunk_get(&s->core->silence_cache,
1335 } else if (!pa_cvolume_is_norm(&volume) || pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1336 pa_memchunk_make_writable(result, 0);
1337 if (pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1338 if (!pa_cvolume_is_norm(&volume))
1339 pa_volume_memchunk(result, &s->sample_spec, &volume);
1340 pa_volume_ramp_memchunk(result, &s->sample_spec, &(s->thread_info.ramp));
1343 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp)) {
1344 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target);
1345 pa_sw_cvolume_multiply(&volume, &volume, &target);
1347 pa_volume_memchunk(result, &s->sample_spec, &volume);
1352 pa_cvolume target_vol;
1354 result->memblock = pa_memblock_new(s->core->mempool, length);
1356 ptr = pa_memblock_acquire(result->memblock);
1357 result->length = pa_mix(info, n,
1360 &s->thread_info.soft_volume,
1361 s->thread_info.soft_muted);
1363 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1364 if (pa_cvolume_ramp_active(&s->thread_info.ramp))
1365 pa_volume_ramp_memchunk(result, &s->sample_spec, &(s->thread_info.ramp));
1367 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target_vol);
1368 pa_volume_memchunk(result, &s->sample_spec, &target_vol);
1372 pa_memblock_release(result->memblock);
1377 inputs_drop(s, info, n, result);
1380 __toggle_open_close_n_write_dump(s, result);
1386 /* Called from IO thread context */
1387 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1388 pa_mix_info info[MAX_MIX_CHANNELS];
1390 size_t length, block_size_max;
1392 pa_sink_assert_ref(s);
1393 pa_sink_assert_io_context(s);
1394 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1396 pa_assert(target->memblock);
1397 pa_assert(target->length > 0);
1398 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1400 pa_assert(!s->thread_info.rewind_requested);
1401 pa_assert(s->thread_info.rewind_nbytes == 0);
1403 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1404 pa_silence_memchunk(target, &s->sample_spec);
1410 length = target->length;
1411 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1412 if (length > block_size_max)
1413 length = pa_frame_align(block_size_max, &s->sample_spec);
1415 pa_assert(length > 0);
1417 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1420 if (target->length > length)
1421 target->length = length;
1423 pa_silence_memchunk(target, &s->sample_spec);
1424 } else if (n == 1) {
1427 if (target->length > length)
1428 target->length = length;
1430 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1432 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1433 pa_silence_memchunk(target, &s->sample_spec);
1436 pa_cvolume target_vol;
1438 vchunk = info[0].chunk;
1439 pa_memblock_ref(vchunk.memblock);
1441 if (vchunk.length > length)
1442 vchunk.length = length;
1444 if (!pa_cvolume_is_norm(&volume) || pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1445 pa_memchunk_make_writable(&vchunk, 0);
1446 if (pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1447 if (!pa_cvolume_is_norm(&volume))
1448 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1449 pa_volume_ramp_memchunk(&vchunk, &s->sample_spec, &(s->thread_info.ramp));
1452 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp)) {
1453 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target_vol);
1454 pa_sw_cvolume_multiply(&volume, &volume, &target_vol);
1456 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1460 pa_memchunk_memcpy(target, &vchunk);
1461 pa_memblock_unref(vchunk.memblock);
1466 pa_cvolume target_vol;
1468 ptr = pa_memblock_acquire(target->memblock);
1470 target->length = pa_mix(info, n,
1471 (uint8_t*) ptr + target->index, length,
1473 &s->thread_info.soft_volume,
1474 s->thread_info.soft_muted);
1476 if (pa_cvolume_ramp_target_active(&s->thread_info.ramp) || pa_cvolume_ramp_active(&s->thread_info.ramp)) {
1477 if (pa_cvolume_ramp_active(&s->thread_info.ramp))
1478 pa_volume_ramp_memchunk(target, &s->sample_spec, &(s->thread_info.ramp));
1480 pa_cvolume_ramp_get_targets(&s->thread_info.ramp, &target_vol);
1481 pa_volume_memchunk(target, &s->sample_spec, &target_vol);
1485 pa_memblock_release(target->memblock);
1488 inputs_drop(s, info, n, target);
1491 __toggle_open_close_n_write_dump(s, target);
1496 /* Called from IO thread context */
1497 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1501 pa_sink_assert_ref(s);
1502 pa_sink_assert_io_context(s);
1503 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1505 pa_assert(target->memblock);
1506 pa_assert(target->length > 0);
1507 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1509 pa_assert(!s->thread_info.rewind_requested);
1510 pa_assert(s->thread_info.rewind_nbytes == 0);
1512 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1513 pa_silence_memchunk(target, &s->sample_spec);
1526 pa_sink_render_into(s, &chunk);
1535 /* Called from IO thread context */
1536 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1537 pa_sink_assert_ref(s);
1538 pa_sink_assert_io_context(s);
1539 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1540 pa_assert(length > 0);
1541 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1544 pa_assert(!s->thread_info.rewind_requested);
1545 pa_assert(s->thread_info.rewind_nbytes == 0);
1549 pa_sink_render(s, length, result);
1551 if (result->length < length) {
1554 pa_memchunk_make_writable(result, length);
1556 chunk.memblock = result->memblock;
1557 chunk.index = result->index + result->length;
1558 chunk.length = length - result->length;
1560 pa_sink_render_into_full(s, &chunk);
1562 result->length = length;
1568 /* Called from main thread */
1569 int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
1571 uint32_t desired_rate = rate;
1572 uint32_t default_rate = s->default_sample_rate;
1573 uint32_t alternate_rate = s->alternate_sample_rate;
1576 bool use_alternate = false;
1578 if (rate == s->sample_spec.rate)
1581 if (!s->update_rate)
1584 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
1585 pa_log_debug("Default and alternate sample rates are the same.");
1589 if (PA_SINK_IS_RUNNING(s->state)) {
1590 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1591 s->sample_spec.rate);
1595 if (s->monitor_source) {
1596 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1597 pa_log_info("Cannot update rate, monitor source is RUNNING");
1602 if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
1606 pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
1607 pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
1609 if (default_rate % 11025 == 0) {
1610 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1613 /* default is 4000 multiple */
1614 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1619 desired_rate = alternate_rate;
1621 desired_rate = default_rate;
1623 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1626 if (desired_rate == s->sample_spec.rate)
1629 if (!passthrough && pa_sink_used_by(s) > 0)
1632 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1633 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1635 if (s->update_rate(s, desired_rate) >= 0) {
1636 /* update monitor source as well */
1637 if (s->monitor_source && !passthrough)
1638 pa_source_update_rate(s->monitor_source, desired_rate, false);
1639 pa_log_info("Changed sampling rate successfully");
1641 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1642 if (i->state == PA_SINK_INPUT_CORKED)
1643 pa_sink_input_update_rate(i);
1649 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1654 /* Called from main thread */
1655 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1658 pa_sink_assert_ref(s);
1659 pa_assert_ctl_context();
1660 pa_assert(PA_SINK_IS_LINKED(s->state));
1662 /* The returned value is supposed to be in the time domain of the sound card! */
1664 if (s->state == PA_SINK_SUSPENDED)
1667 if (!(s->flags & PA_SINK_LATENCY))
1670 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1672 /* usec is unsigned, so check that the offset can be added to usec without
1674 if (-s->latency_offset <= (int64_t) usec)
1675 usec += s->latency_offset;
1682 /* Called from IO thread */
1683 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1687 pa_sink_assert_ref(s);
1688 pa_sink_assert_io_context(s);
1689 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1691 /* The returned value is supposed to be in the time domain of the sound card! */
1693 if (s->thread_info.state == PA_SINK_SUSPENDED)
1696 if (!(s->flags & PA_SINK_LATENCY))
1699 o = PA_MSGOBJECT(s);
1701 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1703 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1706 /* usec is unsigned, so check that the offset can be added to usec without
1708 if (-s->thread_info.latency_offset <= (int64_t) usec)
1709 usec += s->thread_info.latency_offset;
1716 /* Called from the main thread (and also from the IO thread while the main
1717 * thread is waiting).
1719 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1720 * set. Instead, flat volume mode is detected by checking whether the root sink
1721 * has the flag set. */
1722 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1723 pa_sink_assert_ref(s);
1725 s = pa_sink_get_master(s);
1728 return (s->flags & PA_SINK_FLAT_VOLUME);
1733 /* Called from the main thread (and also from the IO thread while the main
1734 * thread is waiting). */
1735 pa_sink *pa_sink_get_master(pa_sink *s) {
1736 pa_sink_assert_ref(s);
1738 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1739 if (PA_UNLIKELY(!s->input_to_master))
1742 s = s->input_to_master->sink;
1748 /* Called from main context */
1749 bool pa_sink_is_passthrough(pa_sink *s) {
1750 pa_sink_input *alt_i;
1753 pa_sink_assert_ref(s);
1755 /* one and only one PASSTHROUGH input can possibly be connected */
1756 if (pa_idxset_size(s->inputs) == 1) {
1757 alt_i = pa_idxset_first(s->inputs, &idx);
1759 if (pa_sink_input_is_passthrough(alt_i))
1766 /* Called from main context */
1767 void pa_sink_enter_passthrough(pa_sink *s) {
1770 /* disable the monitor in passthrough mode */
1771 if (s->monitor_source) {
1772 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1773 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1776 /* set the volume to NORM */
1777 s->saved_volume = *pa_sink_get_volume(s, true);
1778 s->saved_save_volume = s->save_volume;
1780 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1781 pa_sink_set_volume(s, &volume, true, false);
1784 /* Called from main context */
1785 void pa_sink_leave_passthrough(pa_sink *s) {
1786 /* Unsuspend monitor */
1787 if (s->monitor_source) {
1788 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1789 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1792 /* Restore sink volume to what it was before we entered passthrough mode */
1793 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1795 pa_cvolume_init(&s->saved_volume);
1796 s->saved_save_volume = false;
1799 /* Called from main context. */
1800 static void compute_reference_ratio(pa_sink_input *i) {
1802 pa_cvolume remapped;
1806 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1809 * Calculates the reference ratio from the sink's reference
1810 * volume. This basically calculates:
1812 * i->reference_ratio = i->volume / i->sink->reference_volume
1815 remapped = i->sink->reference_volume;
1816 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1818 ratio = i->reference_ratio;
1820 for (c = 0; c < i->sample_spec.channels; c++) {
1822 /* We don't update when the sink volume is 0 anyway */
1823 if (remapped.values[c] <= PA_VOLUME_MUTED)
1826 /* Don't update the reference ratio unless necessary */
1827 if (pa_sw_volume_multiply(
1829 remapped.values[c]) == i->volume.values[c])
1832 ratio.values[c] = pa_sw_volume_divide(
1833 i->volume.values[c],
1834 remapped.values[c]);
1837 pa_sink_input_set_reference_ratio(i, &ratio);
1840 /* Called from main context. Only called for the root sink in volume sharing
1841 * cases, except for internal recursive calls. */
1842 static void compute_reference_ratios(pa_sink *s) {
1846 pa_sink_assert_ref(s);
1847 pa_assert_ctl_context();
1848 pa_assert(PA_SINK_IS_LINKED(s->state));
1849 pa_assert(pa_sink_flat_volume_enabled(s));
1851 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1852 compute_reference_ratio(i);
1854 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1855 compute_reference_ratios(i->origin_sink);
1859 /* Called from main context. Only called for the root sink in volume sharing
1860 * cases, except for internal recursive calls. */
1861 static void compute_real_ratios(pa_sink *s) {
1865 pa_sink_assert_ref(s);
1866 pa_assert_ctl_context();
1867 pa_assert(PA_SINK_IS_LINKED(s->state));
1868 pa_assert(pa_sink_flat_volume_enabled(s));
1870 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1872 pa_cvolume remapped;
1874 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1875 /* The origin sink uses volume sharing, so this input's real ratio
1876 * is handled as a special case - the real ratio must be 0 dB, and
1877 * as a result i->soft_volume must equal i->volume_factor. */
1878 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1879 i->soft_volume = i->volume_factor;
1881 compute_real_ratios(i->origin_sink);
1887 * This basically calculates:
1889 * i->real_ratio := i->volume / s->real_volume
1890 * i->soft_volume := i->real_ratio * i->volume_factor
1893 remapped = s->real_volume;
1894 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1896 i->real_ratio.channels = i->sample_spec.channels;
1897 i->soft_volume.channels = i->sample_spec.channels;
1899 for (c = 0; c < i->sample_spec.channels; c++) {
1901 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1902 /* We leave i->real_ratio untouched */
1903 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1907 /* Don't lose accuracy unless necessary */
1908 if (pa_sw_volume_multiply(
1909 i->real_ratio.values[c],
1910 remapped.values[c]) != i->volume.values[c])
1912 i->real_ratio.values[c] = pa_sw_volume_divide(
1913 i->volume.values[c],
1914 remapped.values[c]);
1916 i->soft_volume.values[c] = pa_sw_volume_multiply(
1917 i->real_ratio.values[c],
1918 i->volume_factor.values[c]);
1921 /* We don't copy the soft_volume to the thread_info data
1922 * here. That must be done by the caller */
1926 static pa_cvolume *cvolume_remap_minimal_impact(
1928 const pa_cvolume *template,
1929 const pa_channel_map *from,
1930 const pa_channel_map *to) {
1935 pa_assert(template);
1938 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1939 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1941 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1942 * mapping from sink input to sink volumes:
1944 * If template is a possible remapping from v it is used instead
1945 * of remapping anew.
1947 * If the channel maps don't match we set an all-channel volume on
1948 * the sink to ensure that changing a volume on one stream has no
1949 * effect that cannot be compensated for in another stream that
1950 * does not have the same channel map as the sink. */
1952 if (pa_channel_map_equal(from, to))
1956 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1961 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1965 /* Called from main thread. Only called for the root sink in volume sharing
1966 * cases, except for internal recursive calls. */
1967 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1971 pa_sink_assert_ref(s);
1972 pa_assert(max_volume);
1973 pa_assert(channel_map);
1974 pa_assert(pa_sink_flat_volume_enabled(s));
1976 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1977 pa_cvolume remapped;
1979 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1980 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1982 /* Ignore this input. The origin sink uses volume sharing, so this
1983 * input's volume will be set to be equal to the root sink's real
1984 * volume. Obviously this input's current volume must not then
1985 * affect what the root sink's real volume will be. */
1989 remapped = i->volume;
1990 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1991 pa_cvolume_merge(max_volume, max_volume, &remapped);
1995 /* Called from main thread. Only called for the root sink in volume sharing
1996 * cases, except for internal recursive calls. */
1997 static bool has_inputs(pa_sink *s) {
2001 pa_sink_assert_ref(s);
2003 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2004 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
2011 /* Called from main thread. Only called for the root sink in volume sharing
2012 * cases, except for internal recursive calls. */
2013 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
2017 pa_sink_assert_ref(s);
2018 pa_assert(new_volume);
2019 pa_assert(channel_map);
2021 s->real_volume = *new_volume;
2022 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2024 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2025 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2026 if (pa_sink_flat_volume_enabled(s)) {
2027 pa_cvolume new_input_volume;
2029 /* Follow the root sink's real volume. */
2030 new_input_volume = *new_volume;
2031 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2032 pa_sink_input_set_volume_direct(i, &new_input_volume);
2033 compute_reference_ratio(i);
2036 update_real_volume(i->origin_sink, new_volume, channel_map);
2041 /* Called from main thread. Only called for the root sink in shared volume
2043 static void compute_real_volume(pa_sink *s) {
2044 pa_sink_assert_ref(s);
2045 pa_assert_ctl_context();
2046 pa_assert(PA_SINK_IS_LINKED(s->state));
2047 pa_assert(pa_sink_flat_volume_enabled(s));
2048 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2050 /* This determines the maximum volume of all streams and sets
2051 * s->real_volume accordingly. */
2053 if (!has_inputs(s)) {
2054 /* In the special case that we have no sink inputs we leave the
2055 * volume unmodified. */
2056 update_real_volume(s, &s->reference_volume, &s->channel_map);
2060 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2062 /* First let's determine the new maximum volume of all inputs
2063 * connected to this sink */
2064 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2065 update_real_volume(s, &s->real_volume, &s->channel_map);
2067 /* Then, let's update the real ratios/soft volumes of all inputs
2068 * connected to this sink */
2069 compute_real_ratios(s);
2072 /* Called from main thread. Only called for the root sink in shared volume
2073 * cases, except for internal recursive calls. */
2074 static void propagate_reference_volume(pa_sink *s) {
2078 pa_sink_assert_ref(s);
2079 pa_assert_ctl_context();
2080 pa_assert(PA_SINK_IS_LINKED(s->state));
2081 pa_assert(pa_sink_flat_volume_enabled(s));
2083 /* This is called whenever the sink volume changes that is not
2084 * caused by a sink input volume change. We need to fix up the
2085 * sink input volumes accordingly */
2087 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2088 pa_cvolume new_volume;
2090 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2091 propagate_reference_volume(i->origin_sink);
2093 /* Since the origin sink uses volume sharing, this input's volume
2094 * needs to be updated to match the root sink's real volume, but
2095 * that will be done later in update_shared_real_volume(). */
2099 /* This basically calculates:
2101 * i->volume := s->reference_volume * i->reference_ratio */
2103 new_volume = s->reference_volume;
2104 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2105 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2106 pa_sink_input_set_volume_direct(i, &new_volume);
2110 /* Called from main thread. Only called for the root sink in volume sharing
2111 * cases, except for internal recursive calls. The return value indicates
2112 * whether any reference volume actually changed. */
2113 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2115 bool reference_volume_changed;
2119 pa_sink_assert_ref(s);
2120 pa_assert(PA_SINK_IS_LINKED(s->state));
2122 pa_assert(channel_map);
2123 pa_assert(pa_cvolume_valid(v));
2126 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2128 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2129 pa_sink_set_reference_volume_direct(s, &volume);
2131 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2133 if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2134 /* If the root sink's volume doesn't change, then there can't be any
2135 * changes in the other sinks in the sink tree either.
2137 * It's probably theoretically possible that even if the root sink's
2138 * volume changes slightly, some filter sink doesn't change its volume
2139 * due to rounding errors. If that happens, we still want to propagate
2140 * the changed root sink volume to the sinks connected to the
2141 * intermediate sink that didn't change its volume. This theoretical
2142 * possibility is the reason why we have that !(s->flags &
2143 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2144 * notice even if we returned here false always if
2145 * reference_volume_changed is false. */
2148 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2149 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2150 update_reference_volume(i->origin_sink, v, channel_map, false);
2156 /* Called from main thread */
2157 void pa_sink_set_volume(
2159 const pa_cvolume *volume,
2163 pa_cvolume new_reference_volume;
2166 pa_sink_assert_ref(s);
2167 pa_assert_ctl_context();
2168 pa_assert(PA_SINK_IS_LINKED(s->state));
2169 pa_assert(!volume || pa_cvolume_valid(volume));
2170 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2171 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2173 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2174 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2175 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2176 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2180 /* In case of volume sharing, the volume is set for the root sink first,
2181 * from which it's then propagated to the sharing sinks. */
2182 root_sink = pa_sink_get_master(s);
2184 if (PA_UNLIKELY(!root_sink))
2187 /* As a special exception we accept mono volumes on all sinks --
2188 * even on those with more complex channel maps */
2191 if (pa_cvolume_compatible(volume, &s->sample_spec))
2192 new_reference_volume = *volume;
2194 new_reference_volume = s->reference_volume;
2195 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2198 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2200 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2201 if (pa_sink_flat_volume_enabled(root_sink)) {
2202 /* OK, propagate this volume change back to the inputs */
2203 propagate_reference_volume(root_sink);
2205 /* And now recalculate the real volume */
2206 compute_real_volume(root_sink);
2208 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2212 /* If volume is NULL we synchronize the sink's real and
2213 * reference volumes with the stream volumes. */
2215 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2217 /* Ok, let's determine the new real volume */
2218 compute_real_volume(root_sink);
2220 /* Let's 'push' the reference volume if necessary */
2221 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2222 /* If the sink and it's root don't have the same number of channels, we need to remap */
2223 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2224 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2225 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2227 /* Now that the reference volume is updated, we can update the streams'
2228 * reference ratios. */
2229 compute_reference_ratios(root_sink);
2232 if (root_sink->set_volume) {
2233 /* If we have a function set_volume(), then we do not apply a
2234 * soft volume by default. However, set_volume() is free to
2235 * apply one to root_sink->soft_volume */
2237 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2238 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2239 root_sink->set_volume(root_sink);
2242 /* If we have no function set_volume(), then the soft volume
2243 * becomes the real volume */
2244 root_sink->soft_volume = root_sink->real_volume;
2246 /* This tells the sink that soft volume and/or real volume changed */
2248 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2251 /* Called from main thread */
2252 void pa_sink_set_volume_ramp(
2254 const pa_cvolume_ramp *ramp,
2258 pa_sink_assert_ref(s);
2259 pa_assert_ctl_context();
2260 pa_assert(PA_SINK_IS_LINKED(s->state));
2263 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2264 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2265 if (pa_sink_is_passthrough(s)) {
2266 pa_log_warn("Cannot do volume ramp, Sink is connected to PASSTHROUGH input");
2270 pa_cvolume_ramp_convert(ramp, &s->ramp, s->sample_spec.rate);
2272 /* This tells the sink that volume ramp changed */
2274 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_RAMP, NULL, 0, NULL) == 0);
2277 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2278 * Only to be called by sink implementor */
2279 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2281 pa_sink_assert_ref(s);
2282 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2284 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2285 pa_sink_assert_io_context(s);
2287 pa_assert_ctl_context();
2290 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2292 s->soft_volume = *volume;
2294 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2295 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2297 s->thread_info.soft_volume = s->soft_volume;
2300 /* Called from the main thread. Only called for the root sink in volume sharing
2301 * cases, except for internal recursive calls. */
2302 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2306 pa_sink_assert_ref(s);
2307 pa_assert(old_real_volume);
2308 pa_assert_ctl_context();
2309 pa_assert(PA_SINK_IS_LINKED(s->state));
2311 /* This is called when the hardware's real volume changes due to
2312 * some external event. We copy the real volume into our
2313 * reference volume and then rebuild the stream volumes based on
2314 * i->real_ratio which should stay fixed. */
2316 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2317 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2320 /* 1. Make the real volume the reference volume */
2321 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2324 if (pa_sink_flat_volume_enabled(s)) {
2326 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2327 pa_cvolume new_volume;
2329 /* 2. Since the sink's reference and real volumes are equal
2330 * now our ratios should be too. */
2331 pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2333 /* 3. Recalculate the new stream reference volume based on the
2334 * reference ratio and the sink's reference volume.
2336 * This basically calculates:
2338 * i->volume = s->reference_volume * i->reference_ratio
2340 * This is identical to propagate_reference_volume() */
2341 new_volume = s->reference_volume;
2342 pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2343 pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2344 pa_sink_input_set_volume_direct(i, &new_volume);
2346 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2347 propagate_real_volume(i->origin_sink, old_real_volume);
2351 /* Something got changed in the hardware. It probably makes sense
2352 * to save changed hw settings given that hw volume changes not
2353 * triggered by PA are almost certainly done by the user. */
2354 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2355 s->save_volume = true;
2358 /* Called from io thread */
2359 void pa_sink_update_volume_and_mute(pa_sink *s) {
2361 pa_sink_assert_io_context(s);
2363 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2366 /* Called from main thread */
2367 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2368 pa_sink_assert_ref(s);
2369 pa_assert_ctl_context();
2370 pa_assert(PA_SINK_IS_LINKED(s->state));
2372 if (s->refresh_volume || force_refresh) {
2373 struct pa_cvolume old_real_volume;
2375 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2377 old_real_volume = s->real_volume;
2379 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2382 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2384 update_real_volume(s, &s->real_volume, &s->channel_map);
2385 propagate_real_volume(s, &old_real_volume);
2388 return &s->reference_volume;
2391 /* Called from main thread. In volume sharing cases, only the root sink may
2393 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2394 pa_cvolume old_real_volume;
2396 pa_sink_assert_ref(s);
2397 pa_assert_ctl_context();
2398 pa_assert(PA_SINK_IS_LINKED(s->state));
2399 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2401 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2403 old_real_volume = s->real_volume;
2404 update_real_volume(s, new_real_volume, &s->channel_map);
2405 propagate_real_volume(s, &old_real_volume);
2408 /* Called from main thread */
2409 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2412 pa_sink_assert_ref(s);
2413 pa_assert_ctl_context();
2415 old_muted = s->muted;
2417 if (mute == old_muted) {
2418 s->save_muted |= save;
2423 s->save_muted = save;
2425 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2426 s->set_mute_in_progress = true;
2428 s->set_mute_in_progress = false;
2431 if (!PA_SINK_IS_LINKED(s->state))
2434 pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2435 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2436 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2437 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2440 /* Called from main thread */
2441 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2443 pa_sink_assert_ref(s);
2444 pa_assert_ctl_context();
2445 pa_assert(PA_SINK_IS_LINKED(s->state));
2447 if ((s->refresh_muted || force_refresh) && s->get_mute) {
2450 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2451 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2452 pa_sink_mute_changed(s, mute);
2454 if (s->get_mute(s, &mute) >= 0)
2455 pa_sink_mute_changed(s, mute);
2462 /* Called from main thread */
2463 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2464 pa_sink_assert_ref(s);
2465 pa_assert_ctl_context();
2466 pa_assert(PA_SINK_IS_LINKED(s->state));
2468 if (s->set_mute_in_progress)
2471 /* pa_sink_set_mute() does this same check, so this may appear redundant,
2472 * but we must have this here also, because the save parameter of
2473 * pa_sink_set_mute() would otherwise have unintended side effects (saving
2474 * the mute state when it shouldn't be saved). */
2475 if (new_muted == s->muted)
2478 pa_sink_set_mute(s, new_muted, true);
2481 /* Called from main thread */
2482 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2483 pa_sink_assert_ref(s);
2484 pa_assert_ctl_context();
2487 pa_proplist_update(s->proplist, mode, p);
2489 if (PA_SINK_IS_LINKED(s->state)) {
2490 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2497 /* Called from main thread */
2498 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2499 void pa_sink_set_description(pa_sink *s, const char *description) {
2501 pa_sink_assert_ref(s);
2502 pa_assert_ctl_context();
2504 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2507 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2509 if (old && description && pa_streq(old, description))
2513 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2515 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2517 if (s->monitor_source) {
2520 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2521 pa_source_set_description(s->monitor_source, n);
2525 if (PA_SINK_IS_LINKED(s->state)) {
2526 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2527 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2531 /* Called from main thread */
2532 unsigned pa_sink_linked_by(pa_sink *s) {
2535 pa_sink_assert_ref(s);
2536 pa_assert_ctl_context();
2537 pa_assert(PA_SINK_IS_LINKED(s->state));
2539 ret = pa_idxset_size(s->inputs);
2541 /* We add in the number of streams connected to us here. Please
2542 * note the asymmetry to pa_sink_used_by()! */
2544 if (s->monitor_source)
2545 ret += pa_source_linked_by(s->monitor_source);
2550 /* Called from main thread */
2551 unsigned pa_sink_used_by(pa_sink *s) {
2554 pa_sink_assert_ref(s);
2555 pa_assert_ctl_context();
2556 pa_assert(PA_SINK_IS_LINKED(s->state));
2558 ret = pa_idxset_size(s->inputs);
2559 pa_assert(ret >= s->n_corked);
2561 /* Streams connected to our monitor source do not matter for
2562 * pa_sink_used_by()!.*/
2564 return ret - s->n_corked;
2567 /* Called from main thread */
2568 unsigned pa_sink_check_suspend(pa_sink *s) {
2573 pa_sink_assert_ref(s);
2574 pa_assert_ctl_context();
2576 if (!PA_SINK_IS_LINKED(s->state))
2581 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2582 pa_sink_input_state_t st;
2584 st = pa_sink_input_get_state(i);
2586 /* We do not assert here. It is perfectly valid for a sink input to
2587 * be in the INIT state (i.e. created, marked done but not yet put)
2588 * and we should not care if it's unlinked as it won't contribute
2589 * towards our busy status.
2591 if (!PA_SINK_INPUT_IS_LINKED(st))
2594 if (st == PA_SINK_INPUT_CORKED)
2597 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2603 if (s->monitor_source)
2604 ret += pa_source_check_suspend(s->monitor_source);
2609 /* Called from the IO thread */
2610 static void sync_input_volumes_within_thread(pa_sink *s) {
2614 pa_sink_assert_ref(s);
2615 pa_sink_assert_io_context(s);
2617 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2618 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2621 i->thread_info.soft_volume = i->soft_volume;
2622 pa_sink_input_request_rewind(i, 0, true, false, false);
2626 /* Called from the IO thread. Only called for the root sink in volume sharing
2627 * cases, except for internal recursive calls. */
2628 static void set_shared_volume_within_thread(pa_sink *s) {
2629 pa_sink_input *i = NULL;
2632 pa_sink_assert_ref(s);
2634 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2636 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2637 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2638 set_shared_volume_within_thread(i->origin_sink);
2642 /* Called from IO thread, except when it is not */
2643 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2644 pa_sink *s = PA_SINK(o);
2645 pa_sink_assert_ref(s);
2647 switch ((pa_sink_message_t) code) {
2649 case PA_SINK_MESSAGE_ADD_INPUT: {
2650 pa_sink_input *i = PA_SINK_INPUT(userdata);
2652 /* If you change anything here, make sure to change the
2653 * sink input handling a few lines down at
2654 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2656 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2658 /* Since the caller sleeps in pa_sink_input_put(), we can
2659 * safely access data outside of thread_info even though
2662 if ((i->thread_info.sync_prev = i->sync_prev)) {
2663 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2664 pa_assert(i->sync_prev->sync_next == i);
2665 i->thread_info.sync_prev->thread_info.sync_next = i;
2668 if ((i->thread_info.sync_next = i->sync_next)) {
2669 pa_assert(i->sink == i->thread_info.sync_next->sink);
2670 pa_assert(i->sync_next->sync_prev == i);
2671 i->thread_info.sync_next->thread_info.sync_prev = i;
2674 pa_assert(!i->thread_info.attached);
2675 i->thread_info.attached = true;
2680 pa_sink_input_set_state_within_thread(i, i->state);
2682 /* The requested latency of the sink input needs to be fixed up and
2683 * then configured on the sink. If this causes the sink latency to
2684 * go down, the sink implementor is responsible for doing a rewind
2685 * in the update_requested_latency() callback to ensure that the
2686 * sink buffer doesn't contain more data than what the new latency
2689 * XXX: Does it really make sense to push this responsibility to
2690 * the sink implementors? Wouldn't it be better to do it once in
2691 * the core than many times in the modules? */
2693 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2694 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2696 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2697 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2699 /* We don't rewind here automatically. This is left to the
2700 * sink input implementor because some sink inputs need a
2701 * slow start, i.e. need some time to buffer client
2702 * samples before beginning streaming.
2704 * XXX: Does it really make sense to push this functionality to
2705 * the sink implementors? Wouldn't it be better to do it once in
2706 * the core than many times in the modules? */
2708 /* In flat volume mode we need to update the volume as
2710 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2713 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2714 pa_sink_input *i = PA_SINK_INPUT(userdata);
2716 /* If you change anything here, make sure to change the
2717 * sink input handling a few lines down at
2718 * PA_SINK_MESSAGE_START_MOVE, too. */
2723 pa_sink_input_set_state_within_thread(i, i->state);
2725 pa_assert(i->thread_info.attached);
2726 i->thread_info.attached = false;
2728 /* Since the caller sleeps in pa_sink_input_unlink(),
2729 * we can safely access data outside of thread_info even
2730 * though it is mutable */
2732 pa_assert(!i->sync_prev);
2733 pa_assert(!i->sync_next);
2735 if (i->thread_info.sync_prev) {
2736 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2737 i->thread_info.sync_prev = NULL;
2740 if (i->thread_info.sync_next) {
2741 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2742 i->thread_info.sync_next = NULL;
2745 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2746 pa_sink_input_unref(i);
2748 pa_sink_invalidate_requested_latency(s, true);
2749 pa_sink_request_rewind(s, (size_t) -1);
2751 /* In flat volume mode we need to update the volume as
2753 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2756 case PA_SINK_MESSAGE_START_MOVE: {
2757 pa_sink_input *i = PA_SINK_INPUT(userdata);
2759 /* We don't support moving synchronized streams. */
2760 pa_assert(!i->sync_prev);
2761 pa_assert(!i->sync_next);
2762 pa_assert(!i->thread_info.sync_next);
2763 pa_assert(!i->thread_info.sync_prev);
2765 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2767 size_t sink_nbytes, total_nbytes;
2769 /* The old sink probably has some audio from this
2770 * stream in its buffer. We want to "take it back" as
2771 * much as possible and play it to the new sink. We
2772 * don't know at this point how much the old sink can
2773 * rewind. We have to pick something, and that
2774 * something is the full latency of the old sink here.
2775 * So we rewind the stream buffer by the sink latency
2776 * amount, which may be more than what we should
2777 * rewind. This can result in a chunk of audio being
2778 * played both to the old sink and the new sink.
2780 * FIXME: Fix this code so that we don't have to make
2781 * guesses about how much the sink will actually be
2782 * able to rewind. If someone comes up with a solution
2783 * for this, something to note is that the part of the
2784 * latency that the old sink couldn't rewind should
2785 * ideally be compensated after the stream has moved
2786 * to the new sink by adding silence. The new sink
2787 * most likely can't start playing the moved stream
2788 * immediately, and that gap should be removed from
2789 * the "compensation silence" (at least at the time of
2790 * writing this, the move finish code will actually
2791 * already take care of dropping the new sink's
2792 * unrewindable latency, so taking into account the
2793 * unrewindable latency of the old sink is the only
2796 * The render_memblockq contents are discarded,
2797 * because when the sink changes, the format of the
2798 * audio stored in the render_memblockq may change
2799 * too, making the stored audio invalid. FIXME:
2800 * However, the read and write indices are moved back
2801 * the same amount, so if they are not the same now,
2802 * they won't be the same after the rewind either. If
2803 * the write index of the render_memblockq is ahead of
2804 * the read index, then the render_memblockq will feed
2805 * the new sink some silence first, which it shouldn't
2806 * do. The write index should be flushed to be the
2807 * same as the read index. */
2809 /* Get the latency of the sink */
2810 usec = pa_sink_get_latency_within_thread(s);
2811 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2812 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2814 if (total_nbytes > 0) {
2815 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2816 i->thread_info.rewrite_flush = true;
2817 pa_sink_input_process_rewind(i, sink_nbytes);
2824 pa_assert(i->thread_info.attached);
2825 i->thread_info.attached = false;
2827 /* Let's remove the sink input ...*/
2828 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2829 pa_sink_input_unref(i);
2831 pa_sink_invalidate_requested_latency(s, true);
2833 pa_log_debug("Requesting rewind due to started move");
2834 pa_sink_request_rewind(s, (size_t) -1);
2836 /* In flat volume mode we need to update the volume as
2838 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2841 case PA_SINK_MESSAGE_FINISH_MOVE: {
2842 pa_sink_input *i = PA_SINK_INPUT(userdata);
2844 /* We don't support moving synchronized streams. */
2845 pa_assert(!i->sync_prev);
2846 pa_assert(!i->sync_next);
2847 pa_assert(!i->thread_info.sync_next);
2848 pa_assert(!i->thread_info.sync_prev);
2850 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2852 pa_assert(!i->thread_info.attached);
2853 i->thread_info.attached = true;
2858 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2862 /* In the ideal case the new sink would start playing
2863 * the stream immediately. That requires the sink to
2864 * be able to rewind all of its latency, which usually
2865 * isn't possible, so there will probably be some gap
2866 * before the moved stream becomes audible. We then
2867 * have two possibilities: 1) start playing the stream
2868 * from where it is now, or 2) drop the unrewindable
2869 * latency of the sink from the stream. With option 1
2870 * we won't lose any audio but the stream will have a
2871 * pause. With option 2 we may lose some audio but the
2872 * stream time will be somewhat in sync with the wall
2873 * clock. Lennart seems to have chosen option 2 (one
2874 * of the reasons might have been that option 1 is
2875 * actually much harder to implement), so we drop the
2876 * latency of the new sink from the moved stream and
2877 * hope that the sink will undo most of that in the
2880 /* Get the latency of the sink */
2881 usec = pa_sink_get_latency_within_thread(s);
2882 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2885 pa_sink_input_drop(i, nbytes);
2887 pa_log_debug("Requesting rewind due to finished move");
2888 pa_sink_request_rewind(s, nbytes);
2891 /* Updating the requested sink latency has to be done
2892 * after the sink rewind request, not before, because
2893 * otherwise the sink may limit the rewind amount
2896 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2897 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2899 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2900 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2902 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2905 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2906 pa_sink *root_sink = pa_sink_get_master(s);
2908 if (PA_LIKELY(root_sink))
2909 set_shared_volume_within_thread(root_sink);
2914 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2916 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2918 pa_sink_volume_change_push(s);
2920 /* Fall through ... */
2922 case PA_SINK_MESSAGE_SET_VOLUME:
2924 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2925 s->thread_info.soft_volume = s->soft_volume;
2926 pa_sink_request_rewind(s, (size_t) -1);
2928 /* Fall through ... */
2930 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2931 sync_input_volumes_within_thread(s);
2934 case PA_SINK_MESSAGE_SET_VOLUME_RAMP:
2935 /* if we have ongoing ramp where we take current start values */
2936 pa_cvolume_ramp_start_from(&s->thread_info.ramp, &s->ramp);
2937 s->thread_info.ramp = s->ramp;
2938 pa_sink_request_rewind(s, (size_t) -1);
2941 case PA_SINK_MESSAGE_GET_VOLUME:
2943 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2945 pa_sink_volume_change_flush(s);
2946 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2949 /* In case sink implementor reset SW volume. */
2950 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2951 s->thread_info.soft_volume = s->soft_volume;
2952 pa_sink_request_rewind(s, (size_t) -1);
2957 case PA_SINK_MESSAGE_SET_MUTE:
2959 if (s->thread_info.soft_muted != s->muted) {
2960 s->thread_info.soft_muted = s->muted;
2961 pa_sink_request_rewind(s, (size_t) -1);
2964 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2969 case PA_SINK_MESSAGE_GET_MUTE:
2971 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2972 return s->get_mute(s, userdata);
2976 case PA_SINK_MESSAGE_SET_STATE: {
2978 bool suspend_change =
2979 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2980 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2982 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2984 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2985 s->thread_info.rewind_nbytes = 0;
2986 s->thread_info.rewind_requested = false;
2989 if (suspend_change) {
2993 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2994 if (i->suspend_within_thread)
2995 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
3001 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
3003 pa_usec_t *usec = userdata;
3004 *usec = pa_sink_get_requested_latency_within_thread(s);
3006 /* Yes, that's right, the IO thread will see -1 when no
3007 * explicit requested latency is configured, the main
3008 * thread will see max_latency */
3009 if (*usec == (pa_usec_t) -1)
3010 *usec = s->thread_info.max_latency;
3015 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
3016 pa_usec_t *r = userdata;
3018 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
3023 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
3024 pa_usec_t *r = userdata;
3026 r[0] = s->thread_info.min_latency;
3027 r[1] = s->thread_info.max_latency;
3032 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
3034 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3037 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3039 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3042 case PA_SINK_MESSAGE_GET_MAX_REWIND:
3044 *((size_t*) userdata) = s->thread_info.max_rewind;
3047 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3049 *((size_t*) userdata) = s->thread_info.max_request;
3052 case PA_SINK_MESSAGE_SET_MAX_REWIND:
3054 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3057 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3059 pa_sink_set_max_request_within_thread(s, (size_t) offset);
3062 case PA_SINK_MESSAGE_SET_PORT:
3064 pa_assert(userdata);
3066 struct sink_message_set_port *msg_data = userdata;
3067 msg_data->ret = s->set_port(s, msg_data->port);
3071 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3072 /* This message is sent from IO-thread and handled in main thread. */
3073 pa_assert_ctl_context();
3075 /* Make sure we're not messing with main thread when no longer linked */
3076 if (!PA_SINK_IS_LINKED(s->state))
3079 pa_sink_get_volume(s, true);
3080 pa_sink_get_mute(s, true);
3083 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
3084 s->thread_info.latency_offset = offset;
3087 case PA_SINK_MESSAGE_GET_LATENCY:
3088 case PA_SINK_MESSAGE_MAX:
3095 /* Called from main thread */
3096 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3101 pa_core_assert_ref(c);
3102 pa_assert_ctl_context();
3103 pa_assert(cause != 0);
3105 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3108 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3115 /* Called from IO thread */
3116 void pa_sink_detach_within_thread(pa_sink *s) {
3120 pa_sink_assert_ref(s);
3121 pa_sink_assert_io_context(s);
3122 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3124 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3128 if (s->monitor_source)
3129 pa_source_detach_within_thread(s->monitor_source);
3132 /* Called from IO thread */
3133 void pa_sink_attach_within_thread(pa_sink *s) {
3137 pa_sink_assert_ref(s);
3138 pa_sink_assert_io_context(s);
3139 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3141 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3145 if (s->monitor_source)
3146 pa_source_attach_within_thread(s->monitor_source);
3149 /* Called from IO thread */
3150 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3151 pa_sink_assert_ref(s);
3152 pa_sink_assert_io_context(s);
3153 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3155 if (nbytes == (size_t) -1)
3156 nbytes = s->thread_info.max_rewind;
3158 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3160 if (s->thread_info.rewind_requested &&
3161 nbytes <= s->thread_info.rewind_nbytes)
3164 s->thread_info.rewind_nbytes = nbytes;
3165 s->thread_info.rewind_requested = true;
3167 if (s->request_rewind)
3168 s->request_rewind(s);
3171 /* Called from IO thread */
3172 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3173 pa_usec_t result = (pa_usec_t) -1;
3176 pa_usec_t monitor_latency;
3178 pa_sink_assert_ref(s);
3179 pa_sink_assert_io_context(s);
3181 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3182 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3184 if (s->thread_info.requested_latency_valid)
3185 return s->thread_info.requested_latency;
3187 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3188 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3189 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3190 result = i->thread_info.requested_sink_latency;
3192 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3194 if (monitor_latency != (pa_usec_t) -1 &&
3195 (result == (pa_usec_t) -1 || result > monitor_latency))
3196 result = monitor_latency;
3198 if (result != (pa_usec_t) -1)
3199 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3201 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3202 /* Only cache if properly initialized */
3203 s->thread_info.requested_latency = result;
3204 s->thread_info.requested_latency_valid = true;
3210 /* Called from main thread */
3211 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3214 pa_sink_assert_ref(s);
3215 pa_assert_ctl_context();
3216 pa_assert(PA_SINK_IS_LINKED(s->state));
3218 if (s->state == PA_SINK_SUSPENDED)
3221 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3226 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3227 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3231 pa_sink_assert_ref(s);
3232 pa_sink_assert_io_context(s);
3234 if (max_rewind == s->thread_info.max_rewind)
3237 s->thread_info.max_rewind = max_rewind;
3239 if (PA_SINK_IS_LINKED(s->thread_info.state))
3240 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3241 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3243 if (s->monitor_source)
3244 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3247 /* Called from main thread */
3248 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3249 pa_sink_assert_ref(s);
3250 pa_assert_ctl_context();
3252 if (PA_SINK_IS_LINKED(s->state))
3253 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3255 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3258 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3259 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3262 pa_sink_assert_ref(s);
3263 pa_sink_assert_io_context(s);
3265 if (max_request == s->thread_info.max_request)
3268 s->thread_info.max_request = max_request;
3270 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3273 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3274 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3278 /* Called from main thread */
3279 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3280 pa_sink_assert_ref(s);
3281 pa_assert_ctl_context();
3283 if (PA_SINK_IS_LINKED(s->state))
3284 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3286 pa_sink_set_max_request_within_thread(s, max_request);
3289 /* Called from IO thread */
3290 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3294 pa_sink_assert_ref(s);
3295 pa_sink_assert_io_context(s);
3297 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3298 s->thread_info.requested_latency_valid = false;
3302 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3304 if (s->update_requested_latency)
3305 s->update_requested_latency(s);
3307 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3308 if (i->update_sink_requested_latency)
3309 i->update_sink_requested_latency(i);
3313 /* Called from main thread */
3314 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3315 pa_sink_assert_ref(s);
3316 pa_assert_ctl_context();
3318 /* min_latency == 0: no limit
3319 * min_latency anything else: specified limit
3321 * Similar for max_latency */
3323 if (min_latency < ABSOLUTE_MIN_LATENCY)
3324 min_latency = ABSOLUTE_MIN_LATENCY;
3326 if (max_latency <= 0 ||
3327 max_latency > ABSOLUTE_MAX_LATENCY)
3328 max_latency = ABSOLUTE_MAX_LATENCY;
3330 pa_assert(min_latency <= max_latency);
3332 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3333 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3334 max_latency == ABSOLUTE_MAX_LATENCY) ||
3335 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3337 if (PA_SINK_IS_LINKED(s->state)) {
3343 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3345 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3348 /* Called from main thread */
3349 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3350 pa_sink_assert_ref(s);
3351 pa_assert_ctl_context();
3352 pa_assert(min_latency);
3353 pa_assert(max_latency);
3355 if (PA_SINK_IS_LINKED(s->state)) {
3356 pa_usec_t r[2] = { 0, 0 };
3358 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3360 *min_latency = r[0];
3361 *max_latency = r[1];
3363 *min_latency = s->thread_info.min_latency;
3364 *max_latency = s->thread_info.max_latency;
3368 /* Called from IO thread */
3369 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3370 pa_sink_assert_ref(s);
3371 pa_sink_assert_io_context(s);
3373 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3374 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3375 pa_assert(min_latency <= max_latency);
3377 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3378 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3379 max_latency == ABSOLUTE_MAX_LATENCY) ||
3380 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3382 if (s->thread_info.min_latency == min_latency &&
3383 s->thread_info.max_latency == max_latency)
3386 s->thread_info.min_latency = min_latency;
3387 s->thread_info.max_latency = max_latency;
3389 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3393 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3394 if (i->update_sink_latency_range)
3395 i->update_sink_latency_range(i);
3398 pa_sink_invalidate_requested_latency(s, false);
3400 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3403 /* Called from main thread */
3404 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3405 pa_sink_assert_ref(s);
3406 pa_assert_ctl_context();
3408 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3409 pa_assert(latency == 0);
3413 if (latency < ABSOLUTE_MIN_LATENCY)
3414 latency = ABSOLUTE_MIN_LATENCY;
3416 if (latency > ABSOLUTE_MAX_LATENCY)
3417 latency = ABSOLUTE_MAX_LATENCY;
3419 if (PA_SINK_IS_LINKED(s->state))
3420 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3422 s->thread_info.fixed_latency = latency;
3424 pa_source_set_fixed_latency(s->monitor_source, latency);
3427 /* Called from main thread */
3428 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3431 pa_sink_assert_ref(s);
3432 pa_assert_ctl_context();
3434 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3437 if (PA_SINK_IS_LINKED(s->state))
3438 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3440 latency = s->thread_info.fixed_latency;
3445 /* Called from IO thread */
3446 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3447 pa_sink_assert_ref(s);
3448 pa_sink_assert_io_context(s);
3450 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3451 pa_assert(latency == 0);
3452 s->thread_info.fixed_latency = 0;
3454 if (s->monitor_source)
3455 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3460 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3461 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3463 if (s->thread_info.fixed_latency == latency)
3466 s->thread_info.fixed_latency = latency;
3468 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3472 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3473 if (i->update_sink_fixed_latency)
3474 i->update_sink_fixed_latency(i);
3477 pa_sink_invalidate_requested_latency(s, false);
3479 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3482 /* Called from main context */
3483 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3484 pa_sink_assert_ref(s);
3486 s->latency_offset = offset;
3488 if (PA_SINK_IS_LINKED(s->state))
3489 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3491 s->thread_info.latency_offset = offset;
3494 /* Called from main context */
3495 size_t pa_sink_get_max_rewind(pa_sink *s) {
3497 pa_assert_ctl_context();
3498 pa_sink_assert_ref(s);
3500 if (!PA_SINK_IS_LINKED(s->state))
3501 return s->thread_info.max_rewind;
3503 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3508 /* Called from main context */
3509 size_t pa_sink_get_max_request(pa_sink *s) {
3511 pa_sink_assert_ref(s);
3512 pa_assert_ctl_context();
3514 if (!PA_SINK_IS_LINKED(s->state))
3515 return s->thread_info.max_request;
3517 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3522 /* Called from main context */
3523 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3524 pa_device_port *port;
3525 pa_device_port *old_port;
3528 pa_sink_assert_ref(s);
3529 pa_assert_ctl_context();
3532 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3533 return -PA_ERR_NOTIMPLEMENTED;
3537 return -PA_ERR_NOENTITY;
3539 if (!(port = pa_hashmap_get(s->ports, name)))
3540 return -PA_ERR_NOENTITY;
3542 old_port = s->active_port;
3544 if (port == old_port) {
3545 s->save_port = s->save_port || save;
3549 pa_device_port_active_changed(old_port, false);
3551 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3552 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3553 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3557 ret = s->set_port(s, port);
3560 pa_log("Failed to set the port of sink %s from %s to %s.", s->name, old_port->name, port->name);
3562 /* We don't know the real state of the device, but let's assume that
3563 * the old port is still active, because s->active_port is left to
3564 * point to the old port anyway. */
3565 pa_device_port_active_changed(old_port, true);
3570 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3572 pa_log_info("Changed port of sink %u \"%s\" from %s to %s", s->index, s->name, old_port->name, port->name);
3574 s->active_port = port;
3575 s->save_port = save;
3577 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3578 pa_device_port_active_changed(port, true);
3580 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3585 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3586 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3590 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3593 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3595 if (pa_streq(ff, "microphone"))
3596 t = "audio-input-microphone";
3597 else if (pa_streq(ff, "webcam"))
3599 else if (pa_streq(ff, "computer"))
3601 else if (pa_streq(ff, "handset"))
3603 else if (pa_streq(ff, "portable"))
3604 t = "multimedia-player";
3605 else if (pa_streq(ff, "tv"))
3606 t = "video-display";
3609 * The following icons are not part of the icon naming spec,
3610 * because Rodney Dawes sucks as the maintainer of that spec.
3612 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3614 else if (pa_streq(ff, "headset"))
3615 t = "audio-headset";
3616 else if (pa_streq(ff, "headphone"))
3617 t = "audio-headphones";
3618 else if (pa_streq(ff, "speaker"))
3619 t = "audio-speakers";
3620 else if (pa_streq(ff, "hands-free"))
3621 t = "audio-handsfree";
3625 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3626 if (pa_streq(c, "modem"))
3633 t = "audio-input-microphone";
3636 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3637 if (strstr(profile, "analog"))
3639 else if (strstr(profile, "iec958"))
3641 else if (strstr(profile, "hdmi"))
3645 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3647 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3652 bool pa_device_init_description(pa_proplist *p) {
3653 const char *s, *d = NULL, *k;
3656 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3659 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3660 if (pa_streq(s, "internal"))
3661 d = _("Built-in Audio");
3664 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3665 if (pa_streq(s, "modem"))
3669 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3674 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3677 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3679 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3684 bool pa_device_init_intended_roles(pa_proplist *p) {
3688 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3691 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3692 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3693 || pa_streq(s, "headset")) {
3694 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3701 unsigned pa_device_init_priority(pa_proplist *p) {
3703 unsigned priority = 0;
3707 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3709 if (pa_streq(s, "sound"))
3711 else if (!pa_streq(s, "modem"))
3715 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3717 if (pa_streq(s, "internal"))
3719 else if (pa_streq(s, "speaker"))
3721 else if (pa_streq(s, "headphone"))
3725 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3727 if (pa_streq(s, "pci"))
3729 else if (pa_streq(s, "usb"))
3731 else if (pa_streq(s, "bluetooth"))
3735 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3737 if (pa_startswith(s, "analog-"))
3739 else if (pa_startswith(s, "iec958-"))
3746 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3748 /* Called from the IO thread. */
3749 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3750 pa_sink_volume_change *c;
3751 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3752 c = pa_xnew(pa_sink_volume_change, 1);
3754 PA_LLIST_INIT(pa_sink_volume_change, c);
3756 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3760 /* Called from the IO thread. */
3761 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3763 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3767 /* Called from the IO thread. */
3768 void pa_sink_volume_change_push(pa_sink *s) {
3769 pa_sink_volume_change *c = NULL;
3770 pa_sink_volume_change *nc = NULL;
3771 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3773 const char *direction = NULL;
3776 nc = pa_sink_volume_change_new(s);
3778 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3779 * Adding one more volume for HW would get us rid of this, but I am trying
3780 * to survive with the ones we already have. */
3781 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3783 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3784 pa_log_debug("Volume not changing");
3785 pa_sink_volume_change_free(nc);
3789 nc->at = pa_sink_get_latency_within_thread(s);
3790 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3792 if (s->thread_info.volume_changes_tail) {
3793 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3794 /* If volume is going up let's do it a bit late. If it is going
3795 * down let's do it a bit early. */
3796 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3797 if (nc->at + safety_margin > c->at) {
3798 nc->at += safety_margin;
3803 else if (nc->at - safety_margin > c->at) {
3804 nc->at -= safety_margin;
3812 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3813 nc->at += safety_margin;
3816 nc->at -= safety_margin;
3819 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3822 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3825 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3827 /* We can ignore volume events that came earlier but should happen later than this. */
3828 PA_LLIST_FOREACH(c, nc->next) {
3829 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3830 pa_sink_volume_change_free(c);
3833 s->thread_info.volume_changes_tail = nc;
3836 /* Called from the IO thread. */
3837 static void pa_sink_volume_change_flush(pa_sink *s) {
3838 pa_sink_volume_change *c = s->thread_info.volume_changes;
3840 s->thread_info.volume_changes = NULL;
3841 s->thread_info.volume_changes_tail = NULL;
3843 pa_sink_volume_change *next = c->next;
3844 pa_sink_volume_change_free(c);
3849 /* Called from the IO thread. */
3850 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3856 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3862 pa_assert(s->write_volume);
3864 now = pa_rtclock_now();
3866 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3867 pa_sink_volume_change *c = s->thread_info.volume_changes;
3868 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3869 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3870 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3872 s->thread_info.current_hw_volume = c->hw_volume;
3873 pa_sink_volume_change_free(c);
3879 if (s->thread_info.volume_changes) {
3881 *usec_to_next = s->thread_info.volume_changes->at - now;
3882 if (pa_log_ratelimit(PA_LOG_DEBUG))
3883 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3888 s->thread_info.volume_changes_tail = NULL;
3893 /* Called from the IO thread. */
3894 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3895 /* All the queued volume events later than current latency are shifted to happen earlier. */
3896 pa_sink_volume_change *c;
3897 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3898 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3899 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3901 pa_log_debug("latency = %lld", (long long) limit);
3902 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3904 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3905 pa_usec_t modified_limit = limit;
3906 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3907 modified_limit -= s->thread_info.volume_change_safety_margin;
3909 modified_limit += s->thread_info.volume_change_safety_margin;
3910 if (c->at > modified_limit) {
3912 if (c->at < modified_limit)
3913 c->at = modified_limit;
3915 prev_vol = pa_cvolume_avg(&c->hw_volume);
3917 pa_sink_volume_change_apply(s, NULL);
3920 /* Called from the main thread */
3921 /* Gets the list of formats supported by the sink. The members and idxset must
3922 * be freed by the caller. */
3923 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3928 if (s->get_formats) {
3929 /* Sink supports format query, all is good */
3930 ret = s->get_formats(s);
3932 /* Sink doesn't support format query, so assume it does PCM */
3933 pa_format_info *f = pa_format_info_new();
3934 f->encoding = PA_ENCODING_PCM;
3936 ret = pa_idxset_new(NULL, NULL);
3937 pa_idxset_put(ret, f, NULL);
3943 /* Called from the main thread */
3944 /* Allows an external source to set what formats a sink supports if the sink
3945 * permits this. The function makes a copy of the formats on success. */
3946 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3951 /* Sink supports setting formats -- let's give it a shot */
3952 return s->set_formats(s, formats);
3954 /* Sink doesn't support setting this -- bail out */
3958 /* Called from the main thread */
3959 /* Checks if the sink can accept this format */
3960 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3961 pa_idxset *formats = NULL;
3967 formats = pa_sink_get_formats(s);
3970 pa_format_info *finfo_device;
3973 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3974 if (pa_format_info_is_compatible(finfo_device, f)) {
3980 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3986 /* Called from the main thread */
3987 /* Calculates the intersection between formats supported by the sink and
3988 * in_formats, and returns these, in the order of the sink's formats. */
3989 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3990 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3991 pa_format_info *f_sink, *f_in;
3996 if (!in_formats || pa_idxset_isempty(in_formats))
3999 sink_formats = pa_sink_get_formats(s);
4001 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
4002 PA_IDXSET_FOREACH(f_in, in_formats, j) {
4003 if (pa_format_info_is_compatible(f_sink, f_in))
4004 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
4010 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
4015 /* Called from the main thread. */
4016 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4017 pa_cvolume old_volume;
4018 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4019 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4024 old_volume = s->reference_volume;
4026 if (pa_cvolume_equal(volume, &old_volume))
4029 s->reference_volume = *volume;
4030 pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4031 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4032 s->flags & PA_SINK_DECIBEL_VOLUME),
4033 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4034 s->flags & PA_SINK_DECIBEL_VOLUME));
4036 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4037 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);