2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
63 data->proplist = pa_proplist_new();
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
113 pa_proplist_free(data->proplist);
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
142 void pa_device_port_free(pa_device_port *p) {
146 pa_xfree(p->description);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
194 pa_namereg_unregister(core, name);
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
217 if (!data->muted_is_set)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
229 pa_namereg_unregister(core, name);
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
237 s->state = PA_SINK_INIT;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
247 s->priority = pa_device_init_priority(s->proplist);
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
252 s->inputs = pa_idxset_new(NULL, NULL);
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
267 /* As a minor optimization we just steal the list instead of
269 s->ports = data->ports;
272 s->active_port = NULL;
273 s->save_port = FALSE;
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
279 if (!s->active_port && s->ports) {
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
291 pa_silence_memchunk_get(
292 &core->silence_cache,
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
344 pa_source_new_data_done(&source_data);
346 if (!s->monitor_source) {
352 s->monitor_source->monitor_of = s;
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
368 pa_assert_ctl_context();
370 if (s->state == state)
373 original_state = s->state;
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
380 if ((ret = s->set_state(s, state)) < 0)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
387 s->set_state(s, original_state);
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
399 if (suspend_change) {
403 /* We're suspending or resuming, tell everyone about it */
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
410 i->suspend(i, state == PA_SINK_SUSPENDED);
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
424 pa_assert(s->state == PA_SINK_INIT);
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
460 pa_source_put(s->monitor_source);
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
469 pa_sink_input *i, *j = NULL;
472 pa_assert_ctl_context();
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
482 linked = PA_SINK_IS_LINKED(s->state);
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
496 pa_sink_input_kill(i);
501 sink_set_state(s, PA_SINK_UNLINKED);
503 s->state = PA_SINK_UNLINKED;
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
525 if (PA_SINK_IS_LINKED(s->state))
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
535 pa_idxset_free(s->inputs, NULL, NULL);
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
549 pa_proplist_free(s->proplist);
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
557 pa_hashmap_free(s->ports, NULL, NULL);
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
585 s->flags = (s->flags & ~mask) | (value & mask);
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
599 s->thread_info.rtpoll = p;
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
611 if (s->state == PA_SINK_SUSPENDED)
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
658 pa_sink_input_ref(i);
660 if (pa_sink_input_start_move(i) >= 0)
663 pa_sink_input_unref(i);
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
682 pa_sink_input_unref(i);
685 pa_queue_free(q, NULL, NULL);
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
692 pa_assert_ctl_context();
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
700 pa_queue_free(q, NULL, NULL);
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
726 pa_log_debug("Processing rewind...");
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
743 size_t mixlength = *length;
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
762 info->userdata = pa_sink_input_ref(i);
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
783 unsigned n_unreffed = 0;
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
791 /* We optimize for the case where the order of the inputs has not changed */
793 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
795 pa_mix_info* m = NULL;
797 pa_sink_input_assert_ref(i);
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
802 if (info[p].userdata == i) {
813 pa_sink_input_drop(i, result->length);
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
822 if (m && m->chunk.memblock) {
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
843 pa_memblock_unref(c.memblock);
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
852 pa_sink_input_unref(m->userdata);
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
879 size_t block_size_max;
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
889 pa_assert(!s->thread_info.rewind_requested);
890 pa_assert(s->thread_info.rewind_nbytes == 0);
892 if (s->thread_info.state == PA_SINK_SUSPENDED) {
893 result->memblock = pa_memblock_ref(s->silence.memblock);
894 result->index = s->silence.index;
895 result->length = PA_MIN(s->silence.length, length);
902 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
904 block_size_max = pa_mempool_block_size_max(s->core->mempool);
905 if (length > block_size_max)
906 length = pa_frame_align(block_size_max, &s->sample_spec);
908 pa_assert(length > 0);
910 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
914 *result = s->silence;
915 pa_memblock_ref(result->memblock);
917 if (result->length > length)
918 result->length = length;
923 *result = info[0].chunk;
924 pa_memblock_ref(result->memblock);
926 if (result->length > length)
927 result->length = length;
929 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
931 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
932 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
933 pa_memblock_unref(result->memblock);
934 pa_silence_memchunk_get(&s->core->silence_cache,
940 pa_memchunk_make_writable(result, 0);
941 pa_volume_memchunk(result, &s->sample_spec, &volume);
946 result->memblock = pa_memblock_new(s->core->mempool, length);
948 ptr = pa_memblock_acquire(result->memblock);
949 result->length = pa_mix(info, n,
952 &s->thread_info.soft_volume,
953 s->thread_info.soft_muted);
954 pa_memblock_release(result->memblock);
959 inputs_drop(s, info, n, result);
964 /* Called from IO thread context */
965 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
966 pa_mix_info info[MAX_MIX_CHANNELS];
968 size_t length, block_size_max;
970 pa_sink_assert_ref(s);
971 pa_sink_assert_io_context(s);
972 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
974 pa_assert(target->memblock);
975 pa_assert(target->length > 0);
976 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
980 pa_assert(!s->thread_info.rewind_requested);
981 pa_assert(s->thread_info.rewind_nbytes == 0);
983 if (s->thread_info.state == PA_SINK_SUSPENDED) {
984 pa_silence_memchunk(target, &s->sample_spec);
989 length = target->length;
990 block_size_max = pa_mempool_block_size_max(s->core->mempool);
991 if (length > block_size_max)
992 length = pa_frame_align(block_size_max, &s->sample_spec);
994 pa_assert(length > 0);
996 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
999 if (target->length > length)
1000 target->length = length;
1002 pa_silence_memchunk(target, &s->sample_spec);
1003 } else if (n == 1) {
1006 if (target->length > length)
1007 target->length = length;
1009 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1011 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1012 pa_silence_memchunk(target, &s->sample_spec);
1016 vchunk = info[0].chunk;
1017 pa_memblock_ref(vchunk.memblock);
1019 if (vchunk.length > length)
1020 vchunk.length = length;
1022 if (!pa_cvolume_is_norm(&volume)) {
1023 pa_memchunk_make_writable(&vchunk, 0);
1024 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1027 pa_memchunk_memcpy(target, &vchunk);
1028 pa_memblock_unref(vchunk.memblock);
1034 ptr = pa_memblock_acquire(target->memblock);
1036 target->length = pa_mix(info, n,
1037 (uint8_t*) ptr + target->index, length,
1039 &s->thread_info.soft_volume,
1040 s->thread_info.soft_muted);
1042 pa_memblock_release(target->memblock);
1045 inputs_drop(s, info, n, target);
1050 /* Called from IO thread context */
1051 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1055 pa_sink_assert_ref(s);
1056 pa_sink_assert_io_context(s);
1057 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1059 pa_assert(target->memblock);
1060 pa_assert(target->length > 0);
1061 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1065 pa_assert(!s->thread_info.rewind_requested);
1066 pa_assert(s->thread_info.rewind_nbytes == 0);
1075 pa_sink_render_into(s, &chunk);
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_mix_info info[MAX_MIX_CHANNELS];
1087 size_t length1st = length;
1090 pa_sink_assert_ref(s);
1091 pa_sink_assert_io_context(s);
1092 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1093 pa_assert(length > 0);
1094 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1099 pa_assert(!s->thread_info.rewind_requested);
1100 pa_assert(s->thread_info.rewind_nbytes == 0);
1102 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1103 pa_silence_memchunk_get(&s->core->silence_cache,
1113 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1116 pa_silence_memchunk_get(&s->core->silence_cache,
1121 } else if (n == 1) {
1124 *result = info[0].chunk;
1125 pa_memblock_ref(result->memblock);
1127 if (result->length > length)
1128 result->length = length;
1130 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1132 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1133 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1134 pa_memblock_unref(result->memblock);
1135 pa_silence_memchunk_get(&s->core->silence_cache,
1141 pa_memchunk_make_writable(result, length);
1142 pa_volume_memchunk(result, &s->sample_spec, &volume);
1149 result->memblock = pa_memblock_new(s->core->mempool, length);
1151 ptr = pa_memblock_acquire(result->memblock);
1153 result->length = pa_mix(info, n,
1154 (uint8_t*) ptr + result->index, length1st,
1156 &s->thread_info.soft_volume,
1157 s->thread_info.soft_muted);
1159 pa_memblock_release(result->memblock);
1162 inputs_drop(s, info, n, result);
1164 if (result->length < length) {
1167 pa_memchunk_make_writable(result, length);
1169 l = length - result->length;
1170 d = result->index + result->length;
1176 pa_sink_render_into(s, &chunk);
1181 result->length = length;
1187 /* Called from main thread */
1188 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1191 pa_sink_assert_ref(s);
1192 pa_assert_ctl_context();
1193 pa_assert(PA_SINK_IS_LINKED(s->state));
1195 /* The returned value is supposed to be in the time domain of the sound card! */
1197 if (s->state == PA_SINK_SUSPENDED)
1200 if (!(s->flags & PA_SINK_LATENCY))
1203 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1208 /* Called from IO thread */
1209 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1213 pa_sink_assert_ref(s);
1214 pa_sink_assert_io_context(s);
1215 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1217 /* The returned value is supposed to be in the time domain of the sound card! */
1219 if (s->thread_info.state == PA_SINK_SUSPENDED)
1222 if (!(s->flags & PA_SINK_LATENCY))
1225 o = PA_MSGOBJECT(s);
1227 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1229 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1235 /* Called from main context */
1236 static void compute_reference_ratios(pa_sink *s) {
1240 pa_sink_assert_ref(s);
1241 pa_assert_ctl_context();
1242 pa_assert(PA_SINK_IS_LINKED(s->state));
1243 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1245 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1247 pa_cvolume remapped;
1250 * Calculates the reference volume from the sink's reference
1251 * volume. This basically calculates:
1253 * i->reference_ratio = i->volume / s->reference_volume
1256 remapped = s->reference_volume;
1257 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1259 i->reference_ratio.channels = i->sample_spec.channels;
1261 for (c = 0; c < i->sample_spec.channels; c++) {
1263 /* We don't update when the sink volume is 0 anyway */
1264 if (remapped.values[c] <= PA_VOLUME_MUTED)
1267 /* Don't update the reference ratio unless necessary */
1268 if (pa_sw_volume_multiply(
1269 i->reference_ratio.values[c],
1270 remapped.values[c]) == i->volume.values[c])
1273 i->reference_ratio.values[c] = pa_sw_volume_divide(
1274 i->volume.values[c],
1275 remapped.values[c]);
1280 /* Called from main context */
1281 static void compute_real_ratios(pa_sink *s) {
1285 pa_sink_assert_ref(s);
1286 pa_assert_ctl_context();
1287 pa_assert(PA_SINK_IS_LINKED(s->state));
1288 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1290 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1292 pa_cvolume remapped;
1295 * This basically calculates:
1297 * i->real_ratio := i->volume / s->real_volume
1298 * i->soft_volume := i->real_ratio * i->volume_factor
1301 remapped = s->real_volume;
1302 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1304 i->real_ratio.channels = i->sample_spec.channels;
1305 i->soft_volume.channels = i->sample_spec.channels;
1307 for (c = 0; c < i->sample_spec.channels; c++) {
1309 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1310 /* We leave i->real_ratio untouched */
1311 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1315 /* Don't lose accuracy unless necessary */
1316 if (pa_sw_volume_multiply(
1317 i->real_ratio.values[c],
1318 remapped.values[c]) != i->volume.values[c])
1320 i->real_ratio.values[c] = pa_sw_volume_divide(
1321 i->volume.values[c],
1322 remapped.values[c]);
1324 i->soft_volume.values[c] = pa_sw_volume_multiply(
1325 i->real_ratio.values[c],
1326 i->volume_factor.values[c]);
1329 /* We don't copy the soft_volume to the thread_info data
1330 * here. That must be done by the caller */
1334 /* Called from main thread */
1335 static void compute_real_volume(pa_sink *s) {
1339 pa_sink_assert_ref(s);
1340 pa_assert_ctl_context();
1341 pa_assert(PA_SINK_IS_LINKED(s->state));
1342 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1344 /* This determines the maximum volume of all streams and sets
1345 * s->real_volume accordingly. */
1347 if (pa_idxset_isempty(s->inputs)) {
1348 /* In the special case that we have no sink input we leave the
1349 * volume unmodified. */
1350 s->real_volume = s->reference_volume;
1354 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1356 /* First let's determine the new maximum volume of all inputs
1357 * connected to this sink */
1358 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1359 pa_cvolume remapped;
1361 remapped = i->volume;
1362 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1363 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1366 /* Then, let's update the real ratios/soft volumes of all inputs
1367 * connected to this sink */
1368 compute_real_ratios(s);
1371 /* Called from main thread */
1372 static void propagate_reference_volume(pa_sink *s) {
1376 pa_sink_assert_ref(s);
1377 pa_assert_ctl_context();
1378 pa_assert(PA_SINK_IS_LINKED(s->state));
1379 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1381 /* This is called whenever the sink volume changes that is not
1382 * caused by a sink input volume change. We need to fix up the
1383 * sink input volumes accordingly */
1385 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1386 pa_cvolume old_volume, remapped;
1388 old_volume = i->volume;
1390 /* This basically calculates:
1392 * i->volume := s->reference_volume * i->reference_ratio */
1394 remapped = s->reference_volume;
1395 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1396 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1398 /* The volume changed, let's tell people so */
1399 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1401 if (i->volume_changed)
1402 i->volume_changed(i);
1404 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1409 /* Called from main thread */
1410 void pa_sink_set_volume(
1412 const pa_cvolume *volume,
1416 pa_cvolume old_reference_volume;
1417 pa_bool_t reference_changed;
1419 pa_sink_assert_ref(s);
1420 pa_assert_ctl_context();
1421 pa_assert(PA_SINK_IS_LINKED(s->state));
1422 pa_assert(!volume || pa_cvolume_valid(volume));
1423 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1424 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1426 /* As a special exception we accept mono volumes on all sinks --
1427 * even on those with more complex channel maps */
1429 /* If volume is NULL we synchronize the sink's real and reference
1430 * volumes with the stream volumes. If it is not NULL we update
1431 * the reference_volume with it. */
1433 old_reference_volume = s->reference_volume;
1437 if (pa_cvolume_compatible(volume, &s->sample_spec))
1438 s->reference_volume = *volume;
1440 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1442 if (s->flags & PA_SINK_FLAT_VOLUME) {
1443 /* OK, propagate this volume change back to the inputs */
1444 propagate_reference_volume(s);
1446 /* And now recalculate the real volume */
1447 compute_real_volume(s);
1449 s->real_volume = s->reference_volume;
1452 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1454 /* Ok, let's determine the new real volume */
1455 compute_real_volume(s);
1457 /* Let's 'push' the reference volume if necessary */
1458 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1460 /* We need to fix the reference ratios of all streams now that
1461 * we changed the reference volume */
1462 compute_reference_ratios(s);
1465 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1466 s->save_volume = (!reference_changed && s->save_volume) || save;
1468 if (s->set_volume) {
1469 /* If we have a function set_volume(), then we do not apply a
1470 * soft volume by default. However, set_volume() is free to
1471 * apply one to s->soft_volume */
1473 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1477 /* If we have no function set_volume(), then the soft volume
1478 * becomes the virtual volume */
1479 s->soft_volume = s->real_volume;
1481 /* This tells the sink that soft and/or virtual volume changed */
1483 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1485 if (reference_changed)
1486 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1489 /* Called from main thread. Only to be called by sink implementor */
1490 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1491 pa_sink_assert_ref(s);
1492 pa_assert_ctl_context();
1495 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1497 s->soft_volume = *volume;
1499 if (PA_SINK_IS_LINKED(s->state))
1500 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1502 s->thread_info.soft_volume = s->soft_volume;
1505 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1508 pa_cvolume old_reference_volume;
1510 pa_sink_assert_ref(s);
1511 pa_assert_ctl_context();
1512 pa_assert(PA_SINK_IS_LINKED(s->state));
1514 /* This is called when the hardware's real volume changes due to
1515 * some external event. We copy the real volume into our
1516 * reference volume and then rebuild the stream volumes based on
1517 * i->real_ratio which should stay fixed. */
1519 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1522 old_reference_volume = s->reference_volume;
1524 /* 1. Make the real volume the reference volume */
1525 s->reference_volume = s->real_volume;
1527 if (s->flags & PA_SINK_FLAT_VOLUME) {
1529 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1530 pa_cvolume old_volume, remapped;
1532 old_volume = i->volume;
1534 /* 2. Since the sink's reference and real volumes are equal
1535 * now our ratios should be too. */
1536 i->reference_ratio = i->real_ratio;
1538 /* 3. Recalculate the new stream reference volume based on the
1539 * reference ratio and the sink's reference volume.
1541 * This basically calculates:
1543 * i->volume = s->reference_volume * i->reference_ratio
1545 * This is identical to propagate_reference_volume() */
1546 remapped = s->reference_volume;
1547 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1548 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1550 /* Notify if something changed */
1551 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1553 if (i->volume_changed)
1554 i->volume_changed(i);
1556 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1561 /* Something got changed in the hardware. It probably makes sense
1562 * to save changed hw settings given that hw volume changes not
1563 * triggered by PA are almost certainly done by the user. */
1564 s->save_volume = TRUE;
1566 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1567 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1570 /* Called from main thread */
1571 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1572 pa_sink_assert_ref(s);
1573 pa_assert_ctl_context();
1574 pa_assert(PA_SINK_IS_LINKED(s->state));
1576 if (s->refresh_volume || force_refresh) {
1577 struct pa_cvolume old_real_volume;
1579 old_real_volume = s->real_volume;
1584 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1586 propagate_real_volume(s, &old_real_volume);
1589 return &s->reference_volume;
1592 /* Called from main thread */
1593 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1594 pa_cvolume old_real_volume;
1596 pa_sink_assert_ref(s);
1597 pa_assert_ctl_context();
1598 pa_assert(PA_SINK_IS_LINKED(s->state));
1600 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1602 old_real_volume = s->real_volume;
1603 s->real_volume = *new_real_volume;
1605 propagate_real_volume(s, &old_real_volume);
1608 /* Called from main thread */
1609 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1610 pa_bool_t old_muted;
1612 pa_sink_assert_ref(s);
1613 pa_assert_ctl_context();
1614 pa_assert(PA_SINK_IS_LINKED(s->state));
1616 old_muted = s->muted;
1618 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1623 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1625 if (old_muted != s->muted)
1626 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1629 /* Called from main thread */
1630 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1632 pa_sink_assert_ref(s);
1633 pa_assert_ctl_context();
1634 pa_assert(PA_SINK_IS_LINKED(s->state));
1636 if (s->refresh_muted || force_refresh) {
1637 pa_bool_t old_muted = s->muted;
1642 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1644 if (old_muted != s->muted) {
1645 s->save_muted = TRUE;
1647 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1649 /* Make sure the soft mute status stays in sync */
1650 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1657 /* Called from main thread */
1658 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1659 pa_sink_assert_ref(s);
1660 pa_assert_ctl_context();
1661 pa_assert(PA_SINK_IS_LINKED(s->state));
1663 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1665 if (s->muted == new_muted)
1668 s->muted = new_muted;
1669 s->save_muted = TRUE;
1671 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1674 /* Called from main thread */
1675 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1676 pa_sink_assert_ref(s);
1677 pa_assert_ctl_context();
1680 pa_proplist_update(s->proplist, mode, p);
1682 if (PA_SINK_IS_LINKED(s->state)) {
1683 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1684 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1690 /* Called from main thread */
1691 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1692 void pa_sink_set_description(pa_sink *s, const char *description) {
1694 pa_sink_assert_ref(s);
1695 pa_assert_ctl_context();
1697 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1700 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1702 if (old && description && pa_streq(old, description))
1706 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1708 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1710 if (s->monitor_source) {
1713 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1714 pa_source_set_description(s->monitor_source, n);
1718 if (PA_SINK_IS_LINKED(s->state)) {
1719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1720 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1724 /* Called from main thread */
1725 unsigned pa_sink_linked_by(pa_sink *s) {
1728 pa_sink_assert_ref(s);
1729 pa_assert_ctl_context();
1730 pa_assert(PA_SINK_IS_LINKED(s->state));
1732 ret = pa_idxset_size(s->inputs);
1734 /* We add in the number of streams connected to us here. Please
1735 * note the asymmmetry to pa_sink_used_by()! */
1737 if (s->monitor_source)
1738 ret += pa_source_linked_by(s->monitor_source);
1743 /* Called from main thread */
1744 unsigned pa_sink_used_by(pa_sink *s) {
1747 pa_sink_assert_ref(s);
1748 pa_assert_ctl_context();
1749 pa_assert(PA_SINK_IS_LINKED(s->state));
1751 ret = pa_idxset_size(s->inputs);
1752 pa_assert(ret >= s->n_corked);
1754 /* Streams connected to our monitor source do not matter for
1755 * pa_sink_used_by()!.*/
1757 return ret - s->n_corked;
1760 /* Called from main thread */
1761 unsigned pa_sink_check_suspend(pa_sink *s) {
1766 pa_sink_assert_ref(s);
1767 pa_assert_ctl_context();
1769 if (!PA_SINK_IS_LINKED(s->state))
1774 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1775 pa_sink_input_state_t st;
1777 st = pa_sink_input_get_state(i);
1778 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1780 if (st == PA_SINK_INPUT_CORKED)
1783 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1789 if (s->monitor_source)
1790 ret += pa_source_check_suspend(s->monitor_source);
1795 /* Called from the IO thread */
1796 static void sync_input_volumes_within_thread(pa_sink *s) {
1800 pa_sink_assert_ref(s);
1801 pa_sink_assert_io_context(s);
1803 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1804 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1807 i->thread_info.soft_volume = i->soft_volume;
1808 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1812 /* Called from IO thread, except when it is not */
1813 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1814 pa_sink *s = PA_SINK(o);
1815 pa_sink_assert_ref(s);
1817 switch ((pa_sink_message_t) code) {
1819 case PA_SINK_MESSAGE_ADD_INPUT: {
1820 pa_sink_input *i = PA_SINK_INPUT(userdata);
1822 /* If you change anything here, make sure to change the
1823 * sink input handling a few lines down at
1824 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1826 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1828 /* Since the caller sleeps in pa_sink_input_put(), we can
1829 * safely access data outside of thread_info even though
1832 if ((i->thread_info.sync_prev = i->sync_prev)) {
1833 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1834 pa_assert(i->sync_prev->sync_next == i);
1835 i->thread_info.sync_prev->thread_info.sync_next = i;
1838 if ((i->thread_info.sync_next = i->sync_next)) {
1839 pa_assert(i->sink == i->thread_info.sync_next->sink);
1840 pa_assert(i->sync_next->sync_prev == i);
1841 i->thread_info.sync_next->thread_info.sync_prev = i;
1844 pa_assert(!i->thread_info.attached);
1845 i->thread_info.attached = TRUE;
1850 pa_sink_input_set_state_within_thread(i, i->state);
1852 /* The requested latency of the sink input needs to be
1853 * fixed up and then configured on the sink */
1855 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1856 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1858 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1859 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1861 /* We don't rewind here automatically. This is left to the
1862 * sink input implementor because some sink inputs need a
1863 * slow start, i.e. need some time to buffer client
1864 * samples before beginning streaming. */
1866 /* In flat volume mode we need to update the volume as
1868 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1871 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1872 pa_sink_input *i = PA_SINK_INPUT(userdata);
1874 /* If you change anything here, make sure to change the
1875 * sink input handling a few lines down at
1876 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1881 pa_sink_input_set_state_within_thread(i, i->state);
1883 pa_assert(i->thread_info.attached);
1884 i->thread_info.attached = FALSE;
1886 /* Since the caller sleeps in pa_sink_input_unlink(),
1887 * we can safely access data outside of thread_info even
1888 * though it is mutable */
1890 pa_assert(!i->sync_prev);
1891 pa_assert(!i->sync_next);
1893 if (i->thread_info.sync_prev) {
1894 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1895 i->thread_info.sync_prev = NULL;
1898 if (i->thread_info.sync_next) {
1899 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1900 i->thread_info.sync_next = NULL;
1903 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1904 pa_sink_input_unref(i);
1906 pa_sink_invalidate_requested_latency(s, TRUE);
1907 pa_sink_request_rewind(s, (size_t) -1);
1909 /* In flat volume mode we need to update the volume as
1911 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1914 case PA_SINK_MESSAGE_START_MOVE: {
1915 pa_sink_input *i = PA_SINK_INPUT(userdata);
1917 /* We don't support moving synchronized streams. */
1918 pa_assert(!i->sync_prev);
1919 pa_assert(!i->sync_next);
1920 pa_assert(!i->thread_info.sync_next);
1921 pa_assert(!i->thread_info.sync_prev);
1923 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1925 size_t sink_nbytes, total_nbytes;
1927 /* Get the latency of the sink */
1928 usec = pa_sink_get_latency_within_thread(s);
1929 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1930 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1932 if (total_nbytes > 0) {
1933 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1934 i->thread_info.rewrite_flush = TRUE;
1935 pa_sink_input_process_rewind(i, sink_nbytes);
1942 pa_assert(i->thread_info.attached);
1943 i->thread_info.attached = FALSE;
1945 /* Let's remove the sink input ...*/
1946 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1947 pa_sink_input_unref(i);
1949 pa_sink_invalidate_requested_latency(s, TRUE);
1951 pa_log_debug("Requesting rewind due to started move");
1952 pa_sink_request_rewind(s, (size_t) -1);
1954 /* In flat volume mode we need to update the volume as
1956 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1959 case PA_SINK_MESSAGE_FINISH_MOVE: {
1960 pa_sink_input *i = PA_SINK_INPUT(userdata);
1962 /* We don't support moving synchronized streams. */
1963 pa_assert(!i->sync_prev);
1964 pa_assert(!i->sync_next);
1965 pa_assert(!i->thread_info.sync_next);
1966 pa_assert(!i->thread_info.sync_prev);
1968 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1970 pa_assert(!i->thread_info.attached);
1971 i->thread_info.attached = TRUE;
1976 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1977 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1979 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1980 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1982 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1986 /* Get the latency of the sink */
1987 usec = pa_sink_get_latency_within_thread(s);
1988 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1991 pa_sink_input_drop(i, nbytes);
1993 pa_log_debug("Requesting rewind due to finished move");
1994 pa_sink_request_rewind(s, nbytes);
1997 /* In flat volume mode we need to update the volume as
1999 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
2002 case PA_SINK_MESSAGE_SET_VOLUME:
2004 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2005 s->thread_info.soft_volume = s->soft_volume;
2006 pa_sink_request_rewind(s, (size_t) -1);
2009 if (!(s->flags & PA_SINK_FLAT_VOLUME))
2012 /* Fall through ... */
2014 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2015 sync_input_volumes_within_thread(s);
2018 case PA_SINK_MESSAGE_GET_VOLUME:
2021 case PA_SINK_MESSAGE_SET_MUTE:
2023 if (s->thread_info.soft_muted != s->muted) {
2024 s->thread_info.soft_muted = s->muted;
2025 pa_sink_request_rewind(s, (size_t) -1);
2030 case PA_SINK_MESSAGE_GET_MUTE:
2033 case PA_SINK_MESSAGE_SET_STATE: {
2035 pa_bool_t suspend_change =
2036 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2037 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2039 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2041 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2042 s->thread_info.rewind_nbytes = 0;
2043 s->thread_info.rewind_requested = FALSE;
2046 if (suspend_change) {
2050 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2051 if (i->suspend_within_thread)
2052 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2058 case PA_SINK_MESSAGE_DETACH:
2060 /* Detach all streams */
2061 pa_sink_detach_within_thread(s);
2064 case PA_SINK_MESSAGE_ATTACH:
2066 /* Reattach all streams */
2067 pa_sink_attach_within_thread(s);
2070 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2072 pa_usec_t *usec = userdata;
2073 *usec = pa_sink_get_requested_latency_within_thread(s);
2075 /* Yes, that's right, the IO thread will see -1 when no
2076 * explicit requested latency is configured, the main
2077 * thread will see max_latency */
2078 if (*usec == (pa_usec_t) -1)
2079 *usec = s->thread_info.max_latency;
2084 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2085 pa_usec_t *r = userdata;
2087 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2092 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2093 pa_usec_t *r = userdata;
2095 r[0] = s->thread_info.min_latency;
2096 r[1] = s->thread_info.max_latency;
2101 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2103 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2106 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2108 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2111 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2113 *((size_t*) userdata) = s->thread_info.max_rewind;
2116 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2118 *((size_t*) userdata) = s->thread_info.max_request;
2121 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2123 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2126 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2128 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2131 case PA_SINK_MESSAGE_GET_LATENCY:
2132 case PA_SINK_MESSAGE_MAX:
2139 /* Called from main thread */
2140 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2145 pa_core_assert_ref(c);
2146 pa_assert_ctl_context();
2147 pa_assert(cause != 0);
2149 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2152 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2159 /* Called from main thread */
2160 void pa_sink_detach(pa_sink *s) {
2161 pa_sink_assert_ref(s);
2162 pa_assert_ctl_context();
2163 pa_assert(PA_SINK_IS_LINKED(s->state));
2165 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2168 /* Called from main thread */
2169 void pa_sink_attach(pa_sink *s) {
2170 pa_sink_assert_ref(s);
2171 pa_assert_ctl_context();
2172 pa_assert(PA_SINK_IS_LINKED(s->state));
2174 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2177 /* Called from IO thread */
2178 void pa_sink_detach_within_thread(pa_sink *s) {
2182 pa_sink_assert_ref(s);
2183 pa_sink_assert_io_context(s);
2184 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2186 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2190 if (s->monitor_source)
2191 pa_source_detach_within_thread(s->monitor_source);
2194 /* Called from IO thread */
2195 void pa_sink_attach_within_thread(pa_sink *s) {
2199 pa_sink_assert_ref(s);
2200 pa_sink_assert_io_context(s);
2201 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2203 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2207 if (s->monitor_source)
2208 pa_source_attach_within_thread(s->monitor_source);
2211 /* Called from IO thread */
2212 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2213 pa_sink_assert_ref(s);
2214 pa_sink_assert_io_context(s);
2215 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2217 if (s->thread_info.state == PA_SINK_SUSPENDED)
2220 if (nbytes == (size_t) -1)
2221 nbytes = s->thread_info.max_rewind;
2223 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2225 if (s->thread_info.rewind_requested &&
2226 nbytes <= s->thread_info.rewind_nbytes)
2229 s->thread_info.rewind_nbytes = nbytes;
2230 s->thread_info.rewind_requested = TRUE;
2232 if (s->request_rewind)
2233 s->request_rewind(s);
2236 /* Called from IO thread */
2237 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2238 pa_usec_t result = (pa_usec_t) -1;
2241 pa_usec_t monitor_latency;
2243 pa_sink_assert_ref(s);
2244 pa_sink_assert_io_context(s);
2246 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2247 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2249 if (s->thread_info.requested_latency_valid)
2250 return s->thread_info.requested_latency;
2252 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2253 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2254 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2255 result = i->thread_info.requested_sink_latency;
2257 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2259 if (monitor_latency != (pa_usec_t) -1 &&
2260 (result == (pa_usec_t) -1 || result > monitor_latency))
2261 result = monitor_latency;
2263 if (result != (pa_usec_t) -1)
2264 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2266 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2267 /* Only cache if properly initialized */
2268 s->thread_info.requested_latency = result;
2269 s->thread_info.requested_latency_valid = TRUE;
2275 /* Called from main thread */
2276 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2279 pa_sink_assert_ref(s);
2280 pa_assert_ctl_context();
2281 pa_assert(PA_SINK_IS_LINKED(s->state));
2283 if (s->state == PA_SINK_SUSPENDED)
2286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2290 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2291 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2295 pa_sink_assert_ref(s);
2296 pa_sink_assert_io_context(s);
2298 if (max_rewind == s->thread_info.max_rewind)
2301 s->thread_info.max_rewind = max_rewind;
2303 if (PA_SINK_IS_LINKED(s->thread_info.state))
2304 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2305 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2307 if (s->monitor_source)
2308 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2311 /* Called from main thread */
2312 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2313 pa_sink_assert_ref(s);
2314 pa_assert_ctl_context();
2316 if (PA_SINK_IS_LINKED(s->state))
2317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2319 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2322 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2323 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2326 pa_sink_assert_ref(s);
2327 pa_sink_assert_io_context(s);
2329 if (max_request == s->thread_info.max_request)
2332 s->thread_info.max_request = max_request;
2334 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2337 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2338 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2342 /* Called from main thread */
2343 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2344 pa_sink_assert_ref(s);
2345 pa_assert_ctl_context();
2347 if (PA_SINK_IS_LINKED(s->state))
2348 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2350 pa_sink_set_max_request_within_thread(s, max_request);
2353 /* Called from IO thread */
2354 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2358 pa_sink_assert_ref(s);
2359 pa_sink_assert_io_context(s);
2361 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2362 s->thread_info.requested_latency_valid = FALSE;
2366 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2368 if (s->update_requested_latency)
2369 s->update_requested_latency(s);
2371 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2372 if (i->update_sink_requested_latency)
2373 i->update_sink_requested_latency(i);
2377 /* Called from main thread */
2378 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2379 pa_sink_assert_ref(s);
2380 pa_assert_ctl_context();
2382 /* min_latency == 0: no limit
2383 * min_latency anything else: specified limit
2385 * Similar for max_latency */
2387 if (min_latency < ABSOLUTE_MIN_LATENCY)
2388 min_latency = ABSOLUTE_MIN_LATENCY;
2390 if (max_latency <= 0 ||
2391 max_latency > ABSOLUTE_MAX_LATENCY)
2392 max_latency = ABSOLUTE_MAX_LATENCY;
2394 pa_assert(min_latency <= max_latency);
2396 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2397 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2398 max_latency == ABSOLUTE_MAX_LATENCY) ||
2399 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2401 if (PA_SINK_IS_LINKED(s->state)) {
2407 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2409 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2412 /* Called from main thread */
2413 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2414 pa_sink_assert_ref(s);
2415 pa_assert_ctl_context();
2416 pa_assert(min_latency);
2417 pa_assert(max_latency);
2419 if (PA_SINK_IS_LINKED(s->state)) {
2420 pa_usec_t r[2] = { 0, 0 };
2422 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2424 *min_latency = r[0];
2425 *max_latency = r[1];
2427 *min_latency = s->thread_info.min_latency;
2428 *max_latency = s->thread_info.max_latency;
2432 /* Called from IO thread */
2433 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2434 pa_sink_assert_ref(s);
2435 pa_sink_assert_io_context(s);
2437 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2438 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2439 pa_assert(min_latency <= max_latency);
2441 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2442 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2443 max_latency == ABSOLUTE_MAX_LATENCY) ||
2444 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2446 if (s->thread_info.min_latency == min_latency &&
2447 s->thread_info.max_latency == max_latency)
2450 s->thread_info.min_latency = min_latency;
2451 s->thread_info.max_latency = max_latency;
2453 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2457 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2458 if (i->update_sink_latency_range)
2459 i->update_sink_latency_range(i);
2462 pa_sink_invalidate_requested_latency(s, FALSE);
2464 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2467 /* Called from main thread */
2468 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2469 pa_sink_assert_ref(s);
2470 pa_assert_ctl_context();
2472 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2473 pa_assert(latency == 0);
2477 if (latency < ABSOLUTE_MIN_LATENCY)
2478 latency = ABSOLUTE_MIN_LATENCY;
2480 if (latency > ABSOLUTE_MAX_LATENCY)
2481 latency = ABSOLUTE_MAX_LATENCY;
2483 if (PA_SINK_IS_LINKED(s->state))
2484 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2486 s->thread_info.fixed_latency = latency;
2488 pa_source_set_fixed_latency(s->monitor_source, latency);
2491 /* Called from main thread */
2492 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2495 pa_sink_assert_ref(s);
2496 pa_assert_ctl_context();
2498 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2501 if (PA_SINK_IS_LINKED(s->state))
2502 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2504 latency = s->thread_info.fixed_latency;
2509 /* Called from IO thread */
2510 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2511 pa_sink_assert_ref(s);
2512 pa_sink_assert_io_context(s);
2514 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2515 pa_assert(latency == 0);
2519 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2520 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2522 if (s->thread_info.fixed_latency == latency)
2525 s->thread_info.fixed_latency = latency;
2527 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2531 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2532 if (i->update_sink_fixed_latency)
2533 i->update_sink_fixed_latency(i);
2536 pa_sink_invalidate_requested_latency(s, FALSE);
2538 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2541 /* Called from main context */
2542 size_t pa_sink_get_max_rewind(pa_sink *s) {
2544 pa_sink_assert_ref(s);
2545 pa_assert_ctl_context();
2547 if (!PA_SINK_IS_LINKED(s->state))
2548 return s->thread_info.max_rewind;
2550 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2555 /* Called from main context */
2556 size_t pa_sink_get_max_request(pa_sink *s) {
2558 pa_sink_assert_ref(s);
2559 pa_assert_ctl_context();
2561 if (!PA_SINK_IS_LINKED(s->state))
2562 return s->thread_info.max_request;
2564 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2569 /* Called from main context */
2570 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2571 pa_device_port *port;
2573 pa_sink_assert_ref(s);
2574 pa_assert_ctl_context();
2577 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2578 return -PA_ERR_NOTIMPLEMENTED;
2582 return -PA_ERR_NOENTITY;
2584 if (!(port = pa_hashmap_get(s->ports, name)))
2585 return -PA_ERR_NOENTITY;
2587 if (s->active_port == port) {
2588 s->save_port = s->save_port || save;
2592 if ((s->set_port(s, port)) < 0)
2593 return -PA_ERR_NOENTITY;
2595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2597 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2599 s->active_port = port;
2600 s->save_port = save;
2605 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2606 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2610 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2613 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2615 if (pa_streq(ff, "microphone"))
2616 t = "audio-input-microphone";
2617 else if (pa_streq(ff, "webcam"))
2619 else if (pa_streq(ff, "computer"))
2621 else if (pa_streq(ff, "handset"))
2623 else if (pa_streq(ff, "portable"))
2624 t = "multimedia-player";
2625 else if (pa_streq(ff, "tv"))
2626 t = "video-display";
2629 * The following icons are not part of the icon naming spec,
2630 * because Rodney Dawes sucks as the maintainer of that spec.
2632 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2634 else if (pa_streq(ff, "headset"))
2635 t = "audio-headset";
2636 else if (pa_streq(ff, "headphone"))
2637 t = "audio-headphones";
2638 else if (pa_streq(ff, "speaker"))
2639 t = "audio-speakers";
2640 else if (pa_streq(ff, "hands-free"))
2641 t = "audio-handsfree";
2645 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2646 if (pa_streq(c, "modem"))
2653 t = "audio-input-microphone";
2656 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2657 if (strstr(profile, "analog"))
2659 else if (strstr(profile, "iec958"))
2661 else if (strstr(profile, "hdmi"))
2665 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2667 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2672 pa_bool_t pa_device_init_description(pa_proplist *p) {
2673 const char *s, *d = NULL, *k;
2676 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2679 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2680 if (pa_streq(s, "internal"))
2681 d = _("Internal Audio");
2684 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2685 if (pa_streq(s, "modem"))
2689 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2694 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2697 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2699 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2704 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2708 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2711 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2712 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2713 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2720 unsigned pa_device_init_priority(pa_proplist *p) {
2722 unsigned priority = 0;
2726 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2728 if (pa_streq(s, "sound"))
2730 else if (!pa_streq(s, "modem"))
2734 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2736 if (pa_streq(s, "internal"))
2738 else if (pa_streq(s, "speaker"))
2740 else if (pa_streq(s, "headphone"))
2744 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2746 if (pa_streq(s, "pci"))
2748 else if (pa_streq(s, "usb"))
2750 else if (pa_streq(s, "bluetooth"))
2754 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2756 if (pa_startswith(s, "analog-"))
2758 else if (pa_startswith(s, "iec958-"))