2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37 #include <pulse/rtclock.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/core-subscribe.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/play-memblockq.h>
47 #include <pulsecore/flist.h>
51 #define MAX_MIX_CHANNELS 32
52 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
53 #define ABSOLUTE_MIN_LATENCY (500)
54 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
55 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
57 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
59 struct pa_sink_volume_change {
63 PA_LLIST_FIELDS(pa_sink_volume_change);
66 struct sink_message_set_port {
71 static void sink_free(pa_object *s);
73 static void pa_sink_volume_change_push(pa_sink *s);
74 static void pa_sink_volume_change_flush(pa_sink *s);
75 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
77 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
81 data->proplist = pa_proplist_new();
86 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
90 data->name = pa_xstrdup(name);
93 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
96 if ((data->sample_spec_is_set = !!spec))
97 data->sample_spec = *spec;
100 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
103 if ((data->channel_map_is_set = !!map))
104 data->channel_map = *map;
107 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
110 if ((data->volume_is_set = !!volume))
111 data->volume = *volume;
114 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
117 data->muted_is_set = TRUE;
118 data->muted = !!mute;
121 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
124 pa_xfree(data->active_port);
125 data->active_port = pa_xstrdup(port);
128 void pa_sink_new_data_done(pa_sink_new_data *data) {
131 pa_proplist_free(data->proplist);
136 while ((p = pa_hashmap_steal_first(data->ports)))
137 pa_device_port_free(p);
139 pa_hashmap_free(data->ports, NULL, NULL);
142 pa_xfree(data->name);
143 pa_xfree(data->active_port);
146 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
151 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
152 p->name = pa_xstrdup(name);
153 p->description = pa_xstrdup(description);
160 void pa_device_port_free(pa_device_port *p) {
164 pa_xfree(p->description);
168 /* Called from main context */
169 static void reset_callbacks(pa_sink *s) {
173 s->get_volume = NULL;
174 s->set_volume = NULL;
177 s->request_rewind = NULL;
178 s->update_requested_latency = NULL;
182 /* Called from main context */
183 pa_sink* pa_sink_new(
185 pa_sink_new_data *data,
186 pa_sink_flags_t flags) {
190 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
191 pa_source_new_data source_data;
197 pa_assert(data->name);
198 pa_assert_ctl_context();
200 s = pa_msgobject_new(pa_sink);
202 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
203 pa_log_debug("Failed to register name %s.", data->name);
208 pa_sink_new_data_set_name(data, name);
210 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
212 pa_namereg_unregister(core, name);
216 /* FIXME, need to free s here on failure */
218 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
219 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
221 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
223 if (!data->channel_map_is_set)
224 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
226 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
227 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
229 if (!data->volume_is_set)
230 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
232 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
233 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
235 if (!data->muted_is_set)
239 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
241 pa_device_init_description(data->proplist);
242 pa_device_init_icon(data->proplist, TRUE);
243 pa_device_init_intended_roles(data->proplist);
245 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
247 pa_namereg_unregister(core, name);
251 s->parent.parent.free = sink_free;
252 s->parent.process_msg = pa_sink_process_msg;
255 s->state = PA_SINK_INIT;
258 s->suspend_cause = 0;
259 s->name = pa_xstrdup(name);
260 s->proplist = pa_proplist_copy(data->proplist);
261 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
262 s->module = data->module;
263 s->card = data->card;
265 s->priority = pa_device_init_priority(s->proplist);
267 s->sample_spec = data->sample_spec;
268 s->channel_map = data->channel_map;
270 s->inputs = pa_idxset_new(NULL, NULL);
273 s->reference_volume = s->real_volume = data->volume;
274 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
275 s->base_volume = PA_VOLUME_NORM;
276 s->n_volume_steps = PA_VOLUME_NORM+1;
277 s->muted = data->muted;
278 s->refresh_volume = s->refresh_muted = FALSE;
285 /* As a minor optimization we just steal the list instead of
287 s->ports = data->ports;
290 s->active_port = NULL;
291 s->save_port = FALSE;
293 if (data->active_port && s->ports)
294 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
295 s->save_port = data->save_port;
297 if (!s->active_port && s->ports) {
301 PA_HASHMAP_FOREACH(p, s->ports, state)
302 if (!s->active_port || p->priority > s->active_port->priority)
306 s->save_volume = data->save_volume;
307 s->save_muted = data->save_muted;
309 pa_silence_memchunk_get(
310 &core->silence_cache,
316 s->thread_info.rtpoll = NULL;
317 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
318 s->thread_info.soft_volume = s->soft_volume;
319 s->thread_info.soft_muted = s->muted;
320 s->thread_info.state = s->state;
321 s->thread_info.rewind_nbytes = 0;
322 s->thread_info.rewind_requested = FALSE;
323 s->thread_info.max_rewind = 0;
324 s->thread_info.max_request = 0;
325 s->thread_info.requested_latency_valid = FALSE;
326 s->thread_info.requested_latency = 0;
327 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
328 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
329 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
331 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
332 s->thread_info.volume_changes_tail = NULL;
333 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
334 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
335 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
337 /* FIXME: This should probably be moved to pa_sink_put() */
338 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
341 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
343 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
344 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
347 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
348 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
352 pa_source_new_data_init(&source_data);
353 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
354 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
355 source_data.name = pa_sprintf_malloc("%s.monitor", name);
356 source_data.driver = data->driver;
357 source_data.module = data->module;
358 source_data.card = data->card;
360 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
361 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
362 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
364 s->monitor_source = pa_source_new(core, &source_data,
365 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
366 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
368 pa_source_new_data_done(&source_data);
370 if (!s->monitor_source) {
376 s->monitor_source->monitor_of = s;
378 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
379 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
380 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
385 /* Called from main context */
386 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
388 pa_bool_t suspend_change;
389 pa_sink_state_t original_state;
392 pa_assert_ctl_context();
394 if (s->state == state)
397 original_state = s->state;
400 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
401 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
404 if ((ret = s->set_state(s, state)) < 0)
408 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
411 s->set_state(s, original_state);
418 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
419 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
420 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
423 if (suspend_change) {
427 /* We're suspending or resuming, tell everyone about it */
429 PA_IDXSET_FOREACH(i, s->inputs, idx)
430 if (s->state == PA_SINK_SUSPENDED &&
431 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
432 pa_sink_input_kill(i);
434 i->suspend(i, state == PA_SINK_SUSPENDED);
436 if (s->monitor_source)
437 pa_source_sync_suspend(s->monitor_source);
443 /* Called from main context */
444 void pa_sink_put(pa_sink* s) {
445 pa_sink_assert_ref(s);
446 pa_assert_ctl_context();
448 pa_assert(s->state == PA_SINK_INIT);
450 /* The following fields must be initialized properly when calling _put() */
451 pa_assert(s->asyncmsgq);
452 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
454 /* Generally, flags should be initialized via pa_sink_new(). As a
455 * special exception we allow volume related flags to be set
456 * between _new() and _put(). */
458 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
459 s->flags |= PA_SINK_DECIBEL_VOLUME;
461 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
462 s->flags |= PA_SINK_FLAT_VOLUME;
464 /* We assume that if the sink implementor changed the default
465 * volume he did so in real_volume, because that is the usual
466 * place where he is supposed to place his changes. */
467 s->reference_volume = s->real_volume;
469 s->thread_info.soft_volume = s->soft_volume;
470 s->thread_info.soft_muted = s->muted;
471 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
473 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
474 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
475 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
476 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
477 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
478 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
479 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || (s->flags & PA_SINK_HW_VOLUME_CTRL));
480 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || s->write_volume);
481 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
483 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
484 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
485 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
487 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
489 pa_source_put(s->monitor_source);
491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
492 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
495 /* Called from main context */
496 void pa_sink_unlink(pa_sink* s) {
498 pa_sink_input *i, *j = NULL;
501 pa_assert_ctl_context();
503 /* Please note that pa_sink_unlink() does more than simply
504 * reversing pa_sink_put(). It also undoes the registrations
505 * already done in pa_sink_new()! */
507 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
508 * may be called multiple times on the same sink without bad
511 linked = PA_SINK_IS_LINKED(s->state);
514 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
516 if (s->state != PA_SINK_UNLINKED)
517 pa_namereg_unregister(s->core, s->name);
518 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
521 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
523 while ((i = pa_idxset_first(s->inputs, NULL))) {
525 pa_sink_input_kill(i);
530 sink_set_state(s, PA_SINK_UNLINKED);
532 s->state = PA_SINK_UNLINKED;
536 if (s->monitor_source)
537 pa_source_unlink(s->monitor_source);
540 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
541 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
545 /* Called from main context */
546 static void sink_free(pa_object *o) {
547 pa_sink *s = PA_SINK(o);
551 pa_assert_ctl_context();
552 pa_assert(pa_sink_refcnt(s) == 0);
554 if (PA_SINK_IS_LINKED(s->state))
557 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
559 if (s->monitor_source) {
560 pa_source_unref(s->monitor_source);
561 s->monitor_source = NULL;
564 pa_idxset_free(s->inputs, NULL, NULL);
566 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
567 pa_sink_input_unref(i);
569 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
571 if (s->silence.memblock)
572 pa_memblock_unref(s->silence.memblock);
578 pa_proplist_free(s->proplist);
583 while ((p = pa_hashmap_steal_first(s->ports)))
584 pa_device_port_free(p);
586 pa_hashmap_free(s->ports, NULL, NULL);
592 /* Called from main context, and not while the IO thread is active, please */
593 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
594 pa_sink_assert_ref(s);
595 pa_assert_ctl_context();
599 if (s->monitor_source)
600 pa_source_set_asyncmsgq(s->monitor_source, q);
603 /* Called from main context, and not while the IO thread is active, please */
604 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
605 pa_sink_assert_ref(s);
606 pa_assert_ctl_context();
611 /* For now, allow only a minimal set of flags to be changed. */
612 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
614 s->flags = (s->flags & ~mask) | (value & mask);
616 pa_source_update_flags(s->monitor_source,
617 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
618 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
619 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
620 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
623 /* Called from IO context, or before _put() from main context */
624 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
625 pa_sink_assert_ref(s);
626 pa_sink_assert_io_context(s);
628 s->thread_info.rtpoll = p;
630 if (s->monitor_source)
631 pa_source_set_rtpoll(s->monitor_source, p);
634 /* Called from main context */
635 int pa_sink_update_status(pa_sink*s) {
636 pa_sink_assert_ref(s);
637 pa_assert_ctl_context();
638 pa_assert(PA_SINK_IS_LINKED(s->state));
640 if (s->state == PA_SINK_SUSPENDED)
643 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
646 /* Called from main context */
647 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651 pa_assert(cause != 0);
654 s->suspend_cause |= cause;
655 s->monitor_source->suspend_cause |= cause;
657 s->suspend_cause &= ~cause;
658 s->monitor_source->suspend_cause &= ~cause;
661 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
664 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
666 if (s->suspend_cause)
667 return sink_set_state(s, PA_SINK_SUSPENDED);
669 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
672 /* Called from main context */
673 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
674 pa_sink_input *i, *n;
677 pa_sink_assert_ref(s);
678 pa_assert_ctl_context();
679 pa_assert(PA_SINK_IS_LINKED(s->state));
684 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
685 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
687 pa_sink_input_ref(i);
689 if (pa_sink_input_start_move(i) >= 0)
692 pa_sink_input_unref(i);
698 /* Called from main context */
699 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
702 pa_sink_assert_ref(s);
703 pa_assert_ctl_context();
704 pa_assert(PA_SINK_IS_LINKED(s->state));
707 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
708 if (pa_sink_input_finish_move(i, s, save) < 0)
709 pa_sink_input_fail_move(i);
711 pa_sink_input_unref(i);
714 pa_queue_free(q, NULL, NULL);
717 /* Called from main context */
718 void pa_sink_move_all_fail(pa_queue *q) {
721 pa_assert_ctl_context();
724 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
725 pa_sink_input_fail_move(i);
726 pa_sink_input_unref(i);
729 pa_queue_free(q, NULL, NULL);
732 /* Called from IO thread context */
733 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
737 pa_sink_assert_ref(s);
738 pa_sink_assert_io_context(s);
739 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
741 /* If nobody requested this and this is actually no real rewind
742 * then we can short cut this. Please note that this means that
743 * not all rewind requests triggered upstream will always be
744 * translated in actual requests! */
745 if (!s->thread_info.rewind_requested && nbytes <= 0)
748 s->thread_info.rewind_nbytes = 0;
749 s->thread_info.rewind_requested = FALSE;
751 if (s->thread_info.state == PA_SINK_SUSPENDED)
755 pa_log_debug("Processing rewind...");
757 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
758 pa_sink_input_assert_ref(i);
759 pa_sink_input_process_rewind(i, nbytes);
763 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
764 pa_source_process_rewind(s->monitor_source, nbytes);
765 if (s->flags & PA_SINK_SYNC_VOLUME)
766 pa_sink_volume_change_rewind(s, nbytes);
770 /* Called from IO thread context */
771 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
775 size_t mixlength = *length;
777 pa_sink_assert_ref(s);
778 pa_sink_assert_io_context(s);
781 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
782 pa_sink_input_assert_ref(i);
784 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
786 if (mixlength == 0 || info->chunk.length < mixlength)
787 mixlength = info->chunk.length;
789 if (pa_memblock_is_silence(info->chunk.memblock)) {
790 pa_memblock_unref(info->chunk.memblock);
794 info->userdata = pa_sink_input_ref(i);
796 pa_assert(info->chunk.memblock);
797 pa_assert(info->chunk.length > 0);
810 /* Called from IO thread context */
811 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
815 unsigned n_unreffed = 0;
817 pa_sink_assert_ref(s);
818 pa_sink_assert_io_context(s);
820 pa_assert(result->memblock);
821 pa_assert(result->length > 0);
823 /* We optimize for the case where the order of the inputs has not changed */
825 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
827 pa_mix_info* m = NULL;
829 pa_sink_input_assert_ref(i);
831 /* Let's try to find the matching entry info the pa_mix_info array */
832 for (j = 0; j < n; j ++) {
834 if (info[p].userdata == i) {
845 pa_sink_input_drop(i, result->length);
847 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
849 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
854 if (m && m->chunk.memblock) {
856 pa_memblock_ref(c.memblock);
857 pa_assert(result->length <= c.length);
858 c.length = result->length;
860 pa_memchunk_make_writable(&c, 0);
861 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
864 pa_memblock_ref(c.memblock);
865 pa_assert(result->length <= c.length);
866 c.length = result->length;
869 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
870 pa_source_output_assert_ref(o);
871 pa_assert(o->direct_on_input == i);
872 pa_source_post_direct(s->monitor_source, o, &c);
875 pa_memblock_unref(c.memblock);
880 if (m->chunk.memblock)
881 pa_memblock_unref(m->chunk.memblock);
882 pa_memchunk_reset(&m->chunk);
884 pa_sink_input_unref(m->userdata);
891 /* Now drop references to entries that are included in the
892 * pa_mix_info array but don't exist anymore */
894 if (n_unreffed < n) {
895 for (; n > 0; info++, n--) {
897 pa_sink_input_unref(info->userdata);
898 if (info->chunk.memblock)
899 pa_memblock_unref(info->chunk.memblock);
903 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
904 pa_source_post(s->monitor_source, result);
907 /* Called from IO thread context */
908 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
909 pa_mix_info info[MAX_MIX_CHANNELS];
911 size_t block_size_max;
913 pa_sink_assert_ref(s);
914 pa_sink_assert_io_context(s);
915 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
916 pa_assert(pa_frame_aligned(length, &s->sample_spec));
919 pa_assert(!s->thread_info.rewind_requested);
920 pa_assert(s->thread_info.rewind_nbytes == 0);
922 if (s->thread_info.state == PA_SINK_SUSPENDED) {
923 result->memblock = pa_memblock_ref(s->silence.memblock);
924 result->index = s->silence.index;
925 result->length = PA_MIN(s->silence.length, length);
932 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
934 block_size_max = pa_mempool_block_size_max(s->core->mempool);
935 if (length > block_size_max)
936 length = pa_frame_align(block_size_max, &s->sample_spec);
938 pa_assert(length > 0);
940 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
944 *result = s->silence;
945 pa_memblock_ref(result->memblock);
947 if (result->length > length)
948 result->length = length;
953 *result = info[0].chunk;
954 pa_memblock_ref(result->memblock);
956 if (result->length > length)
957 result->length = length;
959 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
961 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
962 pa_memblock_unref(result->memblock);
963 pa_silence_memchunk_get(&s->core->silence_cache,
968 } else if (!pa_cvolume_is_norm(&volume)) {
969 pa_memchunk_make_writable(result, 0);
970 pa_volume_memchunk(result, &s->sample_spec, &volume);
974 result->memblock = pa_memblock_new(s->core->mempool, length);
976 ptr = pa_memblock_acquire(result->memblock);
977 result->length = pa_mix(info, n,
980 &s->thread_info.soft_volume,
981 s->thread_info.soft_muted);
982 pa_memblock_release(result->memblock);
987 inputs_drop(s, info, n, result);
992 /* Called from IO thread context */
993 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
994 pa_mix_info info[MAX_MIX_CHANNELS];
996 size_t length, block_size_max;
998 pa_sink_assert_ref(s);
999 pa_sink_assert_io_context(s);
1000 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1002 pa_assert(target->memblock);
1003 pa_assert(target->length > 0);
1004 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1006 pa_assert(!s->thread_info.rewind_requested);
1007 pa_assert(s->thread_info.rewind_nbytes == 0);
1009 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1010 pa_silence_memchunk(target, &s->sample_spec);
1016 length = target->length;
1017 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1018 if (length > block_size_max)
1019 length = pa_frame_align(block_size_max, &s->sample_spec);
1021 pa_assert(length > 0);
1023 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1026 if (target->length > length)
1027 target->length = length;
1029 pa_silence_memchunk(target, &s->sample_spec);
1030 } else if (n == 1) {
1033 if (target->length > length)
1034 target->length = length;
1036 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1038 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1039 pa_silence_memchunk(target, &s->sample_spec);
1043 vchunk = info[0].chunk;
1044 pa_memblock_ref(vchunk.memblock);
1046 if (vchunk.length > length)
1047 vchunk.length = length;
1049 if (!pa_cvolume_is_norm(&volume)) {
1050 pa_memchunk_make_writable(&vchunk, 0);
1051 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1054 pa_memchunk_memcpy(target, &vchunk);
1055 pa_memblock_unref(vchunk.memblock);
1061 ptr = pa_memblock_acquire(target->memblock);
1063 target->length = pa_mix(info, n,
1064 (uint8_t*) ptr + target->index, length,
1066 &s->thread_info.soft_volume,
1067 s->thread_info.soft_muted);
1069 pa_memblock_release(target->memblock);
1072 inputs_drop(s, info, n, target);
1077 /* Called from IO thread context */
1078 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1082 pa_sink_assert_ref(s);
1083 pa_sink_assert_io_context(s);
1084 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1086 pa_assert(target->memblock);
1087 pa_assert(target->length > 0);
1088 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1090 pa_assert(!s->thread_info.rewind_requested);
1091 pa_assert(s->thread_info.rewind_nbytes == 0);
1093 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1094 pa_silence_memchunk(target, &s->sample_spec);
1107 pa_sink_render_into(s, &chunk);
1116 /* Called from IO thread context */
1117 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1118 pa_sink_assert_ref(s);
1119 pa_sink_assert_io_context(s);
1120 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1121 pa_assert(length > 0);
1122 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1125 pa_assert(!s->thread_info.rewind_requested);
1126 pa_assert(s->thread_info.rewind_nbytes == 0);
1130 pa_sink_render(s, length, result);
1132 if (result->length < length) {
1135 pa_memchunk_make_writable(result, length);
1137 chunk.memblock = result->memblock;
1138 chunk.index = result->index + result->length;
1139 chunk.length = length - result->length;
1141 pa_sink_render_into_full(s, &chunk);
1143 result->length = length;
1149 /* Called from main thread */
1150 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1153 pa_sink_assert_ref(s);
1154 pa_assert_ctl_context();
1155 pa_assert(PA_SINK_IS_LINKED(s->state));
1157 /* The returned value is supposed to be in the time domain of the sound card! */
1159 if (s->state == PA_SINK_SUSPENDED)
1162 if (!(s->flags & PA_SINK_LATENCY))
1165 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1170 /* Called from IO thread */
1171 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1175 pa_sink_assert_ref(s);
1176 pa_sink_assert_io_context(s);
1177 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1179 /* The returned value is supposed to be in the time domain of the sound card! */
1181 if (s->thread_info.state == PA_SINK_SUSPENDED)
1184 if (!(s->flags & PA_SINK_LATENCY))
1187 o = PA_MSGOBJECT(s);
1189 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1191 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1197 static pa_cvolume* cvolume_remap_minimal_impact(
1199 const pa_cvolume *template,
1200 const pa_channel_map *from,
1201 const pa_channel_map *to) {
1206 pa_assert(template);
1210 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(v, from), NULL);
1211 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(template, to), NULL);
1213 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1214 * mapping from sink input to sink volumes:
1216 * If template is a possible remapping from v it is used instead
1217 * of remapping anew.
1219 * If the channel maps don't match we set an all-channel volume on
1220 * the sink to ensure that changing a volume on one stream has no
1221 * effect that cannot be compensated for in another stream that
1222 * does not have the same channel map as the sink. */
1224 if (pa_channel_map_equal(from, to))
1228 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1233 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1237 /* Called from main context */
1238 static void compute_reference_ratios(pa_sink *s) {
1242 pa_sink_assert_ref(s);
1243 pa_assert_ctl_context();
1244 pa_assert(PA_SINK_IS_LINKED(s->state));
1245 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1247 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1249 pa_cvolume remapped;
1252 * Calculates the reference volume from the sink's reference
1253 * volume. This basically calculates:
1255 * i->reference_ratio = i->volume / s->reference_volume
1258 remapped = s->reference_volume;
1259 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1261 i->reference_ratio.channels = i->sample_spec.channels;
1263 for (c = 0; c < i->sample_spec.channels; c++) {
1265 /* We don't update when the sink volume is 0 anyway */
1266 if (remapped.values[c] <= PA_VOLUME_MUTED)
1269 /* Don't update the reference ratio unless necessary */
1270 if (pa_sw_volume_multiply(
1271 i->reference_ratio.values[c],
1272 remapped.values[c]) == i->volume.values[c])
1275 i->reference_ratio.values[c] = pa_sw_volume_divide(
1276 i->volume.values[c],
1277 remapped.values[c]);
1282 /* Called from main context */
1283 static void compute_real_ratios(pa_sink *s) {
1287 pa_sink_assert_ref(s);
1288 pa_assert_ctl_context();
1289 pa_assert(PA_SINK_IS_LINKED(s->state));
1290 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1292 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1294 pa_cvolume remapped;
1297 * This basically calculates:
1299 * i->real_ratio := i->volume / s->real_volume
1300 * i->soft_volume := i->real_ratio * i->volume_factor
1303 remapped = s->real_volume;
1304 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1306 i->real_ratio.channels = i->sample_spec.channels;
1307 i->soft_volume.channels = i->sample_spec.channels;
1309 for (c = 0; c < i->sample_spec.channels; c++) {
1311 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1312 /* We leave i->real_ratio untouched */
1313 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1317 /* Don't lose accuracy unless necessary */
1318 if (pa_sw_volume_multiply(
1319 i->real_ratio.values[c],
1320 remapped.values[c]) != i->volume.values[c])
1322 i->real_ratio.values[c] = pa_sw_volume_divide(
1323 i->volume.values[c],
1324 remapped.values[c]);
1326 i->soft_volume.values[c] = pa_sw_volume_multiply(
1327 i->real_ratio.values[c],
1328 i->volume_factor.values[c]);
1331 /* We don't copy the soft_volume to the thread_info data
1332 * here. That must be done by the caller */
1336 /* Called from main thread */
1337 static void compute_real_volume(pa_sink *s) {
1341 pa_sink_assert_ref(s);
1342 pa_assert_ctl_context();
1343 pa_assert(PA_SINK_IS_LINKED(s->state));
1344 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1346 /* This determines the maximum volume of all streams and sets
1347 * s->real_volume accordingly. */
1349 if (pa_idxset_isempty(s->inputs)) {
1350 /* In the special case that we have no sink input we leave the
1351 * volume unmodified. */
1352 s->real_volume = s->reference_volume;
1356 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1358 /* First let's determine the new maximum volume of all inputs
1359 * connected to this sink */
1360 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1361 pa_cvolume remapped;
1363 remapped = i->volume;
1364 cvolume_remap_minimal_impact(&remapped, &s->real_volume, &i->channel_map, &s->channel_map);
1365 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1368 /* Then, let's update the real ratios/soft volumes of all inputs
1369 * connected to this sink */
1370 compute_real_ratios(s);
1373 /* Called from main thread */
1374 static void propagate_reference_volume(pa_sink *s) {
1378 pa_sink_assert_ref(s);
1379 pa_assert_ctl_context();
1380 pa_assert(PA_SINK_IS_LINKED(s->state));
1381 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1383 /* This is called whenever the sink volume changes that is not
1384 * caused by a sink input volume change. We need to fix up the
1385 * sink input volumes accordingly */
1387 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1388 pa_cvolume old_volume, remapped;
1390 old_volume = i->volume;
1392 /* This basically calculates:
1394 * i->volume := s->reference_volume * i->reference_ratio */
1396 remapped = s->reference_volume;
1397 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1398 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1400 /* The volume changed, let's tell people so */
1401 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1403 if (i->volume_changed)
1404 i->volume_changed(i);
1406 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1411 /* Called from main thread */
1412 void pa_sink_set_volume(
1414 const pa_cvolume *volume,
1418 pa_cvolume old_reference_volume;
1419 pa_bool_t reference_changed;
1421 pa_sink_assert_ref(s);
1422 pa_assert_ctl_context();
1423 pa_assert(PA_SINK_IS_LINKED(s->state));
1424 pa_assert(!volume || pa_cvolume_valid(volume));
1425 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1426 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1428 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1429 if (s->flags & PA_SINK_PASSTHROUGH) {
1430 pa_sink_input *alt_i;
1433 /* one and only one PASSTHROUGH input can possibly be connected */
1434 if (pa_idxset_size(s->inputs) == 1) {
1436 alt_i = pa_idxset_first(s->inputs, &idx);
1438 if (alt_i->flags & PA_SINK_INPUT_PASSTHROUGH) {
1439 /* FIXME: Need to notify client that volume control is disabled */
1440 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1446 /* As a special exception we accept mono volumes on all sinks --
1447 * even on those with more complex channel maps */
1449 /* If volume is NULL we synchronize the sink's real and reference
1450 * volumes with the stream volumes. If it is not NULL we update
1451 * the reference_volume with it. */
1453 old_reference_volume = s->reference_volume;
1457 if (pa_cvolume_compatible(volume, &s->sample_spec))
1458 s->reference_volume = *volume;
1460 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1462 if (s->flags & PA_SINK_FLAT_VOLUME) {
1463 /* OK, propagate this volume change back to the inputs */
1464 propagate_reference_volume(s);
1466 /* And now recalculate the real volume */
1467 compute_real_volume(s);
1469 s->real_volume = s->reference_volume;
1472 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1474 /* Ok, let's determine the new real volume */
1475 compute_real_volume(s);
1477 /* Let's 'push' the reference volume if necessary */
1478 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1480 /* We need to fix the reference ratios of all streams now that
1481 * we changed the reference volume */
1482 compute_reference_ratios(s);
1485 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1486 s->save_volume = (!reference_changed && s->save_volume) || save;
1488 if (s->set_volume) {
1489 /* If we have a function set_volume(), then we do not apply a
1490 * soft volume by default. However, set_volume() is free to
1491 * apply one to s->soft_volume */
1493 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1494 if (!(s->flags & PA_SINK_SYNC_VOLUME))
1500 /* If we have no function set_volume(), then the soft volume
1501 * becomes the virtual volume */
1502 s->soft_volume = s->real_volume;
1504 /* This tells the sink that soft and/or virtual volume changed */
1506 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL) == 0);
1508 if (reference_changed)
1509 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1512 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1513 * Only to be called by sink implementor */
1514 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1515 pa_sink_assert_ref(s);
1516 if (s->flags & PA_SINK_SYNC_VOLUME)
1517 pa_sink_assert_io_context(s);
1519 pa_assert_ctl_context();
1522 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1524 s->soft_volume = *volume;
1526 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_SYNC_VOLUME))
1527 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1529 s->thread_info.soft_volume = s->soft_volume;
1532 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1535 pa_cvolume old_reference_volume;
1537 pa_sink_assert_ref(s);
1538 pa_assert_ctl_context();
1539 pa_assert(PA_SINK_IS_LINKED(s->state));
1541 /* This is called when the hardware's real volume changes due to
1542 * some external event. We copy the real volume into our
1543 * reference volume and then rebuild the stream volumes based on
1544 * i->real_ratio which should stay fixed. */
1546 if (old_real_volume && pa_cvolume_equal(old_real_volume, &s->real_volume))
1549 old_reference_volume = s->reference_volume;
1551 /* 1. Make the real volume the reference volume */
1552 s->reference_volume = s->real_volume;
1554 if (s->flags & PA_SINK_FLAT_VOLUME) {
1556 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1557 pa_cvolume old_volume, remapped;
1559 old_volume = i->volume;
1561 /* 2. Since the sink's reference and real volumes are equal
1562 * now our ratios should be too. */
1563 i->reference_ratio = i->real_ratio;
1565 /* 3. Recalculate the new stream reference volume based on the
1566 * reference ratio and the sink's reference volume.
1568 * This basically calculates:
1570 * i->volume = s->reference_volume * i->reference_ratio
1572 * This is identical to propagate_reference_volume() */
1573 remapped = s->reference_volume;
1574 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1575 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1577 /* Notify if something changed */
1578 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1580 if (i->volume_changed)
1581 i->volume_changed(i);
1583 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1588 /* Something got changed in the hardware. It probably makes sense
1589 * to save changed hw settings given that hw volume changes not
1590 * triggered by PA are almost certainly done by the user. */
1591 s->save_volume = TRUE;
1593 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1594 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1597 /* Called from io thread */
1598 void pa_sink_update_volume_and_mute(pa_sink *s) {
1600 pa_sink_assert_io_context(s);
1602 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1605 /* Called from main thread */
1606 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1607 pa_sink_assert_ref(s);
1608 pa_assert_ctl_context();
1609 pa_assert(PA_SINK_IS_LINKED(s->state));
1611 if (s->refresh_volume || force_refresh) {
1612 struct pa_cvolume old_real_volume;
1614 old_real_volume = s->real_volume;
1616 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume)
1619 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1621 propagate_real_volume(s, &old_real_volume);
1624 return &s->reference_volume;
1627 /* Called from main thread */
1628 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1629 pa_cvolume old_real_volume;
1631 pa_sink_assert_ref(s);
1632 pa_assert_ctl_context();
1633 pa_assert(PA_SINK_IS_LINKED(s->state));
1635 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1637 old_real_volume = s->real_volume;
1638 s->real_volume = *new_real_volume;
1640 propagate_real_volume(s, &old_real_volume);
1643 /* Called from main thread */
1644 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1645 pa_bool_t old_muted;
1647 pa_sink_assert_ref(s);
1648 pa_assert_ctl_context();
1649 pa_assert(PA_SINK_IS_LINKED(s->state));
1651 old_muted = s->muted;
1653 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1655 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->set_mute)
1658 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1660 if (old_muted != s->muted)
1661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1664 /* Called from main thread */
1665 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1667 pa_sink_assert_ref(s);
1668 pa_assert_ctl_context();
1669 pa_assert(PA_SINK_IS_LINKED(s->state));
1671 if (s->refresh_muted || force_refresh) {
1672 pa_bool_t old_muted = s->muted;
1674 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_mute)
1677 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1679 if (old_muted != s->muted) {
1680 s->save_muted = TRUE;
1682 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1684 /* Make sure the soft mute status stays in sync */
1685 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1692 /* Called from main thread */
1693 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1694 pa_sink_assert_ref(s);
1695 pa_assert_ctl_context();
1696 pa_assert(PA_SINK_IS_LINKED(s->state));
1698 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1700 if (s->muted == new_muted)
1703 s->muted = new_muted;
1704 s->save_muted = TRUE;
1706 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1709 /* Called from main thread */
1710 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1711 pa_sink_assert_ref(s);
1712 pa_assert_ctl_context();
1715 pa_proplist_update(s->proplist, mode, p);
1717 if (PA_SINK_IS_LINKED(s->state)) {
1718 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1725 /* Called from main thread */
1726 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1727 void pa_sink_set_description(pa_sink *s, const char *description) {
1729 pa_sink_assert_ref(s);
1730 pa_assert_ctl_context();
1732 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1735 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1737 if (old && description && pa_streq(old, description))
1741 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1743 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1745 if (s->monitor_source) {
1748 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1749 pa_source_set_description(s->monitor_source, n);
1753 if (PA_SINK_IS_LINKED(s->state)) {
1754 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1755 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1759 /* Called from main thread */
1760 unsigned pa_sink_linked_by(pa_sink *s) {
1763 pa_sink_assert_ref(s);
1764 pa_assert_ctl_context();
1765 pa_assert(PA_SINK_IS_LINKED(s->state));
1767 ret = pa_idxset_size(s->inputs);
1769 /* We add in the number of streams connected to us here. Please
1770 * note the asymmmetry to pa_sink_used_by()! */
1772 if (s->monitor_source)
1773 ret += pa_source_linked_by(s->monitor_source);
1778 /* Called from main thread */
1779 unsigned pa_sink_used_by(pa_sink *s) {
1782 pa_sink_assert_ref(s);
1783 pa_assert_ctl_context();
1784 pa_assert(PA_SINK_IS_LINKED(s->state));
1786 ret = pa_idxset_size(s->inputs);
1787 pa_assert(ret >= s->n_corked);
1789 /* Streams connected to our monitor source do not matter for
1790 * pa_sink_used_by()!.*/
1792 return ret - s->n_corked;
1795 /* Called from main thread */
1796 unsigned pa_sink_check_suspend(pa_sink *s) {
1801 pa_sink_assert_ref(s);
1802 pa_assert_ctl_context();
1804 if (!PA_SINK_IS_LINKED(s->state))
1809 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1810 pa_sink_input_state_t st;
1812 st = pa_sink_input_get_state(i);
1814 /* We do not assert here. It is perfectly valid for a sink input to
1815 * be in the INIT state (i.e. created, marked done but not yet put)
1816 * and we should not care if it's unlinked as it won't contribute
1817 * towarards our busy status.
1819 if (!PA_SINK_INPUT_IS_LINKED(st))
1822 if (st == PA_SINK_INPUT_CORKED)
1825 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1831 if (s->monitor_source)
1832 ret += pa_source_check_suspend(s->monitor_source);
1837 /* Called from the IO thread */
1838 static void sync_input_volumes_within_thread(pa_sink *s) {
1842 pa_sink_assert_ref(s);
1843 pa_sink_assert_io_context(s);
1845 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1846 if (pa_atomic_load(&i->before_ramping_v))
1847 i->thread_info.future_soft_volume = i->soft_volume;
1849 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1852 if (!pa_atomic_load(&i->before_ramping_v))
1853 i->thread_info.soft_volume = i->soft_volume;
1854 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1858 /* Called from IO thread, except when it is not */
1859 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1860 pa_sink *s = PA_SINK(o);
1861 pa_sink_assert_ref(s);
1863 switch ((pa_sink_message_t) code) {
1865 case PA_SINK_MESSAGE_ADD_INPUT: {
1866 pa_sink_input *i = PA_SINK_INPUT(userdata);
1868 /* If you change anything here, make sure to change the
1869 * sink input handling a few lines down at
1870 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1872 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1874 /* Since the caller sleeps in pa_sink_input_put(), we can
1875 * safely access data outside of thread_info even though
1878 if ((i->thread_info.sync_prev = i->sync_prev)) {
1879 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1880 pa_assert(i->sync_prev->sync_next == i);
1881 i->thread_info.sync_prev->thread_info.sync_next = i;
1884 if ((i->thread_info.sync_next = i->sync_next)) {
1885 pa_assert(i->sink == i->thread_info.sync_next->sink);
1886 pa_assert(i->sync_next->sync_prev == i);
1887 i->thread_info.sync_next->thread_info.sync_prev = i;
1890 pa_assert(!i->thread_info.attached);
1891 i->thread_info.attached = TRUE;
1896 pa_sink_input_set_state_within_thread(i, i->state);
1898 /* The requested latency of the sink input needs to be
1899 * fixed up and then configured on the sink */
1901 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1902 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1904 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1905 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1907 /* We don't rewind here automatically. This is left to the
1908 * sink input implementor because some sink inputs need a
1909 * slow start, i.e. need some time to buffer client
1910 * samples before beginning streaming. */
1912 /* In flat volume mode we need to update the volume as
1914 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1917 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1918 pa_sink_input *i = PA_SINK_INPUT(userdata);
1920 /* If you change anything here, make sure to change the
1921 * sink input handling a few lines down at
1922 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1927 pa_sink_input_set_state_within_thread(i, i->state);
1929 pa_assert(i->thread_info.attached);
1930 i->thread_info.attached = FALSE;
1932 /* Since the caller sleeps in pa_sink_input_unlink(),
1933 * we can safely access data outside of thread_info even
1934 * though it is mutable */
1936 pa_assert(!i->sync_prev);
1937 pa_assert(!i->sync_next);
1939 if (i->thread_info.sync_prev) {
1940 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1941 i->thread_info.sync_prev = NULL;
1944 if (i->thread_info.sync_next) {
1945 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1946 i->thread_info.sync_next = NULL;
1949 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1950 pa_sink_input_unref(i);
1952 pa_sink_invalidate_requested_latency(s, TRUE);
1953 pa_sink_request_rewind(s, (size_t) -1);
1955 /* In flat volume mode we need to update the volume as
1957 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1960 case PA_SINK_MESSAGE_START_MOVE: {
1961 pa_sink_input *i = PA_SINK_INPUT(userdata);
1963 /* We don't support moving synchronized streams. */
1964 pa_assert(!i->sync_prev);
1965 pa_assert(!i->sync_next);
1966 pa_assert(!i->thread_info.sync_next);
1967 pa_assert(!i->thread_info.sync_prev);
1969 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1971 size_t sink_nbytes, total_nbytes;
1973 /* Get the latency of the sink */
1974 usec = pa_sink_get_latency_within_thread(s);
1975 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1976 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1978 if (total_nbytes > 0) {
1979 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1980 i->thread_info.rewrite_flush = TRUE;
1981 pa_sink_input_process_rewind(i, sink_nbytes);
1988 pa_assert(i->thread_info.attached);
1989 i->thread_info.attached = FALSE;
1991 /* Let's remove the sink input ...*/
1992 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1993 pa_sink_input_unref(i);
1995 pa_sink_invalidate_requested_latency(s, TRUE);
1997 pa_log_debug("Requesting rewind due to started move");
1998 pa_sink_request_rewind(s, (size_t) -1);
2000 /* In flat volume mode we need to update the volume as
2002 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2005 case PA_SINK_MESSAGE_FINISH_MOVE: {
2006 pa_sink_input *i = PA_SINK_INPUT(userdata);
2008 /* We don't support moving synchronized streams. */
2009 pa_assert(!i->sync_prev);
2010 pa_assert(!i->sync_next);
2011 pa_assert(!i->thread_info.sync_next);
2012 pa_assert(!i->thread_info.sync_prev);
2014 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2016 pa_assert(!i->thread_info.attached);
2017 i->thread_info.attached = TRUE;
2022 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2023 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2025 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2026 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2028 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2032 /* Get the latency of the sink */
2033 usec = pa_sink_get_latency_within_thread(s);
2034 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2037 pa_sink_input_drop(i, nbytes);
2039 pa_log_debug("Requesting rewind due to finished move");
2040 pa_sink_request_rewind(s, nbytes);
2043 /* In flat volume mode we need to update the volume as
2045 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2048 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2050 if (s->flags & PA_SINK_SYNC_VOLUME) {
2052 pa_sink_volume_change_push(s);
2054 /* Fall through ... */
2056 case PA_SINK_MESSAGE_SET_VOLUME:
2058 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2059 s->thread_info.soft_volume = s->soft_volume;
2060 pa_sink_request_rewind(s, (size_t) -1);
2063 if (!(s->flags & PA_SINK_FLAT_VOLUME))
2066 /* Fall through ... */
2068 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2069 sync_input_volumes_within_thread(s);
2072 case PA_SINK_MESSAGE_GET_VOLUME:
2074 if ((s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume) {
2076 pa_sink_volume_change_flush(s);
2077 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2080 /* In case sink implementor reset SW volume. */
2081 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2082 s->thread_info.soft_volume = s->soft_volume;
2083 pa_sink_request_rewind(s, (size_t) -1);
2088 case PA_SINK_MESSAGE_SET_MUTE:
2090 if (s->thread_info.soft_muted != s->muted) {
2091 s->thread_info.soft_muted = s->muted;
2092 pa_sink_request_rewind(s, (size_t) -1);
2095 if (s->flags & PA_SINK_SYNC_VOLUME && s->set_mute)
2100 case PA_SINK_MESSAGE_GET_MUTE:
2102 if (s->flags & PA_SINK_SYNC_VOLUME && s->get_mute)
2107 case PA_SINK_MESSAGE_SET_STATE: {
2109 pa_bool_t suspend_change =
2110 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2111 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2113 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2115 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2116 s->thread_info.rewind_nbytes = 0;
2117 s->thread_info.rewind_requested = FALSE;
2120 if (suspend_change) {
2124 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2125 if (i->suspend_within_thread)
2126 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2132 case PA_SINK_MESSAGE_DETACH:
2134 /* Detach all streams */
2135 pa_sink_detach_within_thread(s);
2138 case PA_SINK_MESSAGE_ATTACH:
2140 /* Reattach all streams */
2141 pa_sink_attach_within_thread(s);
2144 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2146 pa_usec_t *usec = userdata;
2147 *usec = pa_sink_get_requested_latency_within_thread(s);
2149 /* Yes, that's right, the IO thread will see -1 when no
2150 * explicit requested latency is configured, the main
2151 * thread will see max_latency */
2152 if (*usec == (pa_usec_t) -1)
2153 *usec = s->thread_info.max_latency;
2158 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2159 pa_usec_t *r = userdata;
2161 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2166 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2167 pa_usec_t *r = userdata;
2169 r[0] = s->thread_info.min_latency;
2170 r[1] = s->thread_info.max_latency;
2175 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2177 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2180 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2182 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2185 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2187 *((size_t*) userdata) = s->thread_info.max_rewind;
2190 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2192 *((size_t*) userdata) = s->thread_info.max_request;
2195 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2197 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2200 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2202 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2205 case PA_SINK_MESSAGE_SET_PORT:
2207 pa_assert(userdata);
2209 struct sink_message_set_port *msg_data = userdata;
2210 msg_data->ret = s->set_port(s, msg_data->port);
2214 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2215 /* This message is sent from IO-thread and handled in main thread. */
2216 pa_assert_ctl_context();
2218 pa_sink_get_volume(s, TRUE);
2219 pa_sink_get_mute(s, TRUE);
2222 case PA_SINK_MESSAGE_GET_LATENCY:
2223 case PA_SINK_MESSAGE_MAX:
2230 /* Called from main thread */
2231 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2236 pa_core_assert_ref(c);
2237 pa_assert_ctl_context();
2238 pa_assert(cause != 0);
2240 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2243 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2250 /* Called from main thread */
2251 void pa_sink_detach(pa_sink *s) {
2252 pa_sink_assert_ref(s);
2253 pa_assert_ctl_context();
2254 pa_assert(PA_SINK_IS_LINKED(s->state));
2256 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2259 /* Called from main thread */
2260 void pa_sink_attach(pa_sink *s) {
2261 pa_sink_assert_ref(s);
2262 pa_assert_ctl_context();
2263 pa_assert(PA_SINK_IS_LINKED(s->state));
2265 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2268 /* Called from IO thread */
2269 void pa_sink_detach_within_thread(pa_sink *s) {
2273 pa_sink_assert_ref(s);
2274 pa_sink_assert_io_context(s);
2275 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2277 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2281 if (s->monitor_source)
2282 pa_source_detach_within_thread(s->monitor_source);
2285 /* Called from IO thread */
2286 void pa_sink_attach_within_thread(pa_sink *s) {
2290 pa_sink_assert_ref(s);
2291 pa_sink_assert_io_context(s);
2292 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2294 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2298 if (s->monitor_source)
2299 pa_source_attach_within_thread(s->monitor_source);
2302 /* Called from IO thread */
2303 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2304 pa_sink_assert_ref(s);
2305 pa_sink_assert_io_context(s);
2306 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2308 if (s->thread_info.state == PA_SINK_SUSPENDED)
2311 if (nbytes == (size_t) -1)
2312 nbytes = s->thread_info.max_rewind;
2314 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2316 if (s->thread_info.rewind_requested &&
2317 nbytes <= s->thread_info.rewind_nbytes)
2320 s->thread_info.rewind_nbytes = nbytes;
2321 s->thread_info.rewind_requested = TRUE;
2323 if (s->request_rewind)
2324 s->request_rewind(s);
2327 /* Called from IO thread */
2328 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2329 pa_usec_t result = (pa_usec_t) -1;
2332 pa_usec_t monitor_latency;
2334 pa_sink_assert_ref(s);
2335 pa_sink_assert_io_context(s);
2337 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2338 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2340 if (s->thread_info.requested_latency_valid)
2341 return s->thread_info.requested_latency;
2343 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2344 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2345 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2346 result = i->thread_info.requested_sink_latency;
2348 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2350 if (monitor_latency != (pa_usec_t) -1 &&
2351 (result == (pa_usec_t) -1 || result > monitor_latency))
2352 result = monitor_latency;
2354 if (result != (pa_usec_t) -1)
2355 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2357 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2358 /* Only cache if properly initialized */
2359 s->thread_info.requested_latency = result;
2360 s->thread_info.requested_latency_valid = TRUE;
2366 /* Called from main thread */
2367 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2370 pa_sink_assert_ref(s);
2371 pa_assert_ctl_context();
2372 pa_assert(PA_SINK_IS_LINKED(s->state));
2374 if (s->state == PA_SINK_SUSPENDED)
2377 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2381 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2382 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2386 pa_sink_assert_ref(s);
2387 pa_sink_assert_io_context(s);
2389 if (max_rewind == s->thread_info.max_rewind)
2392 s->thread_info.max_rewind = max_rewind;
2394 if (PA_SINK_IS_LINKED(s->thread_info.state))
2395 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2396 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2398 if (s->monitor_source)
2399 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2402 /* Called from main thread */
2403 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2404 pa_sink_assert_ref(s);
2405 pa_assert_ctl_context();
2407 if (PA_SINK_IS_LINKED(s->state))
2408 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2410 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2413 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2414 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2417 pa_sink_assert_ref(s);
2418 pa_sink_assert_io_context(s);
2420 if (max_request == s->thread_info.max_request)
2423 s->thread_info.max_request = max_request;
2425 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2428 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2429 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2433 /* Called from main thread */
2434 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2435 pa_sink_assert_ref(s);
2436 pa_assert_ctl_context();
2438 if (PA_SINK_IS_LINKED(s->state))
2439 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2441 pa_sink_set_max_request_within_thread(s, max_request);
2444 /* Called from IO thread */
2445 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2449 pa_sink_assert_ref(s);
2450 pa_sink_assert_io_context(s);
2452 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2453 s->thread_info.requested_latency_valid = FALSE;
2457 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2459 if (s->update_requested_latency)
2460 s->update_requested_latency(s);
2462 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2463 if (i->update_sink_requested_latency)
2464 i->update_sink_requested_latency(i);
2468 /* Called from main thread */
2469 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2470 pa_sink_assert_ref(s);
2471 pa_assert_ctl_context();
2473 /* min_latency == 0: no limit
2474 * min_latency anything else: specified limit
2476 * Similar for max_latency */
2478 if (min_latency < ABSOLUTE_MIN_LATENCY)
2479 min_latency = ABSOLUTE_MIN_LATENCY;
2481 if (max_latency <= 0 ||
2482 max_latency > ABSOLUTE_MAX_LATENCY)
2483 max_latency = ABSOLUTE_MAX_LATENCY;
2485 pa_assert(min_latency <= max_latency);
2487 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2488 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2489 max_latency == ABSOLUTE_MAX_LATENCY) ||
2490 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2492 if (PA_SINK_IS_LINKED(s->state)) {
2498 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2500 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2503 /* Called from main thread */
2504 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2505 pa_sink_assert_ref(s);
2506 pa_assert_ctl_context();
2507 pa_assert(min_latency);
2508 pa_assert(max_latency);
2510 if (PA_SINK_IS_LINKED(s->state)) {
2511 pa_usec_t r[2] = { 0, 0 };
2513 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2515 *min_latency = r[0];
2516 *max_latency = r[1];
2518 *min_latency = s->thread_info.min_latency;
2519 *max_latency = s->thread_info.max_latency;
2523 /* Called from IO thread */
2524 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2525 pa_sink_assert_ref(s);
2526 pa_sink_assert_io_context(s);
2528 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2529 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2530 pa_assert(min_latency <= max_latency);
2532 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2533 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2534 max_latency == ABSOLUTE_MAX_LATENCY) ||
2535 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2537 if (s->thread_info.min_latency == min_latency &&
2538 s->thread_info.max_latency == max_latency)
2541 s->thread_info.min_latency = min_latency;
2542 s->thread_info.max_latency = max_latency;
2544 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2548 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2549 if (i->update_sink_latency_range)
2550 i->update_sink_latency_range(i);
2553 pa_sink_invalidate_requested_latency(s, FALSE);
2555 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2558 /* Called from main thread */
2559 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2560 pa_sink_assert_ref(s);
2561 pa_assert_ctl_context();
2563 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2564 pa_assert(latency == 0);
2568 if (latency < ABSOLUTE_MIN_LATENCY)
2569 latency = ABSOLUTE_MIN_LATENCY;
2571 if (latency > ABSOLUTE_MAX_LATENCY)
2572 latency = ABSOLUTE_MAX_LATENCY;
2574 if (PA_SINK_IS_LINKED(s->state))
2575 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2577 s->thread_info.fixed_latency = latency;
2579 pa_source_set_fixed_latency(s->monitor_source, latency);
2582 /* Called from main thread */
2583 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2586 pa_sink_assert_ref(s);
2587 pa_assert_ctl_context();
2589 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2592 if (PA_SINK_IS_LINKED(s->state))
2593 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2595 latency = s->thread_info.fixed_latency;
2600 /* Called from IO thread */
2601 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2602 pa_sink_assert_ref(s);
2603 pa_sink_assert_io_context(s);
2605 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2606 pa_assert(latency == 0);
2610 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2611 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2613 if (s->thread_info.fixed_latency == latency)
2616 s->thread_info.fixed_latency = latency;
2618 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2622 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2623 if (i->update_sink_fixed_latency)
2624 i->update_sink_fixed_latency(i);
2627 pa_sink_invalidate_requested_latency(s, FALSE);
2629 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2632 /* Called from main context */
2633 size_t pa_sink_get_max_rewind(pa_sink *s) {
2635 pa_sink_assert_ref(s);
2636 pa_assert_ctl_context();
2638 if (!PA_SINK_IS_LINKED(s->state))
2639 return s->thread_info.max_rewind;
2641 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2646 /* Called from main context */
2647 size_t pa_sink_get_max_request(pa_sink *s) {
2649 pa_sink_assert_ref(s);
2650 pa_assert_ctl_context();
2652 if (!PA_SINK_IS_LINKED(s->state))
2653 return s->thread_info.max_request;
2655 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2660 /* Called from main context */
2661 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2662 pa_device_port *port;
2664 pa_sink_assert_ref(s);
2665 pa_assert_ctl_context();
2668 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2669 return -PA_ERR_NOTIMPLEMENTED;
2673 return -PA_ERR_NOENTITY;
2675 if (!(port = pa_hashmap_get(s->ports, name)))
2676 return -PA_ERR_NOENTITY;
2678 if (s->active_port == port) {
2679 s->save_port = s->save_port || save;
2683 if (s->flags & PA_SINK_SYNC_VOLUME) {
2684 struct sink_message_set_port msg = { .port = port, .ret = 0 };
2685 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2689 ret = s->set_port(s, port);
2692 return -PA_ERR_NOENTITY;
2694 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2696 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2698 s->active_port = port;
2699 s->save_port = save;
2704 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2705 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2709 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2712 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2714 if (pa_streq(ff, "microphone"))
2715 t = "audio-input-microphone";
2716 else if (pa_streq(ff, "webcam"))
2718 else if (pa_streq(ff, "computer"))
2720 else if (pa_streq(ff, "handset"))
2722 else if (pa_streq(ff, "portable"))
2723 t = "multimedia-player";
2724 else if (pa_streq(ff, "tv"))
2725 t = "video-display";
2728 * The following icons are not part of the icon naming spec,
2729 * because Rodney Dawes sucks as the maintainer of that spec.
2731 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2733 else if (pa_streq(ff, "headset"))
2734 t = "audio-headset";
2735 else if (pa_streq(ff, "headphone"))
2736 t = "audio-headphones";
2737 else if (pa_streq(ff, "speaker"))
2738 t = "audio-speakers";
2739 else if (pa_streq(ff, "hands-free"))
2740 t = "audio-handsfree";
2744 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2745 if (pa_streq(c, "modem"))
2752 t = "audio-input-microphone";
2755 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2756 if (strstr(profile, "analog"))
2758 else if (strstr(profile, "iec958"))
2760 else if (strstr(profile, "hdmi"))
2764 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2766 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2771 pa_bool_t pa_device_init_description(pa_proplist *p) {
2772 const char *s, *d = NULL, *k;
2775 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2778 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2779 if (pa_streq(s, "internal"))
2780 d = _("Internal Audio");
2783 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2784 if (pa_streq(s, "modem"))
2788 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2793 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2796 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2798 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2803 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2807 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2810 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2811 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
2812 || pa_streq(s, "headset")) {
2813 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2820 unsigned pa_device_init_priority(pa_proplist *p) {
2822 unsigned priority = 0;
2826 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2828 if (pa_streq(s, "sound"))
2830 else if (!pa_streq(s, "modem"))
2834 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2836 if (pa_streq(s, "internal"))
2838 else if (pa_streq(s, "speaker"))
2840 else if (pa_streq(s, "headphone"))
2844 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2846 if (pa_streq(s, "pci"))
2848 else if (pa_streq(s, "usb"))
2850 else if (pa_streq(s, "bluetooth"))
2854 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2856 if (pa_startswith(s, "analog-"))
2858 else if (pa_startswith(s, "iec958-"))
2865 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
2867 /* Called from the IO thread. */
2868 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
2869 pa_sink_volume_change *c;
2870 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
2871 c = pa_xnew(pa_sink_volume_change, 1);
2873 PA_LLIST_INIT(pa_sink_volume_change, c);
2875 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2879 /* Called from the IO thread. */
2880 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
2882 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
2886 /* Called from the IO thread. */
2887 void pa_sink_volume_change_push(pa_sink *s) {
2888 pa_sink_volume_change *c = NULL;
2889 pa_sink_volume_change *nc = NULL;
2890 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2892 const char *direction = NULL;
2895 nc = pa_sink_volume_change_new(s);
2897 /* NOTE: There is already more different volumes in pa_sink that I can remember.
2898 * Adding one more volume for HW would get us rid of this, but I am trying
2899 * to survive with the ones we already have. */
2900 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2902 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2903 pa_log_debug("Volume not changing");
2904 pa_sink_volume_change_free(nc);
2908 /* Get the latency of the sink */
2909 if (PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &nc->at, 0, NULL) < 0)
2912 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2914 if (s->thread_info.volume_changes_tail) {
2915 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2916 /* If volume is going up let's do it a bit late. If it is going
2917 * down let's do it a bit early. */
2918 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2919 if (nc->at + safety_margin > c->at) {
2920 nc->at += safety_margin;
2925 else if (nc->at - safety_margin > c->at) {
2926 nc->at -= safety_margin;
2934 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2935 nc->at += safety_margin;
2938 nc->at -= safety_margin;
2941 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
2944 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
2947 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), nc->at);
2949 /* We can ignore volume events that came earlier but should happen later than this. */
2950 PA_LLIST_FOREACH(c, nc->next) {
2951 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), c->at);
2952 pa_sink_volume_change_free(c);
2955 s->thread_info.volume_changes_tail = nc;
2958 /* Called from the IO thread. */
2959 static void pa_sink_volume_change_flush(pa_sink *s) {
2960 pa_sink_volume_change *c = s->thread_info.volume_changes;
2962 s->thread_info.volume_changes = NULL;
2963 s->thread_info.volume_changes_tail = NULL;
2965 pa_sink_volume_change *next = c->next;
2966 pa_sink_volume_change_free(c);
2971 /* Called from the IO thread. */
2972 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
2973 pa_usec_t now = pa_rtclock_now();
2974 pa_bool_t ret = FALSE;
2977 pa_assert(s->write_volume);
2979 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2980 pa_sink_volume_change *c = s->thread_info.volume_changes;
2981 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
2982 pa_log_debug("Volume change to %d at %llu was written %llu usec late", pa_cvolume_avg(&c->hw_volume), c->at, now - c->at);
2984 s->thread_info.current_hw_volume = c->hw_volume;
2985 pa_sink_volume_change_free(c);
2988 if (s->write_volume && ret)
2991 if (s->thread_info.volume_changes) {
2993 *usec_to_next = s->thread_info.volume_changes->at - now;
2994 if (pa_log_ratelimit())
2995 pa_log_debug("Next volume change in %lld usec", s->thread_info.volume_changes->at - now);
3000 s->thread_info.volume_changes_tail = NULL;
3005 /* Called from the IO thread. */
3006 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3007 /* All the queued volume events later than current latency are shifted to happen earlier. */
3008 pa_sink_volume_change *c;
3009 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3012 /* Get the latency of the sink */
3013 if (PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &limit, 0, NULL) < 0)
3016 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3018 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3019 if (c->at > limit) {